first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@ -0,0 +1,24 @@
"""
Import all submodules main classes into the package space
"""
# flake8: noqa
import inspect
from .base import *
from .blob import *
from .commit import *
from .submodule import util as smutil
from .submodule.base import *
from .submodule.root import *
from .tag import *
from .tree import *
# Fix import dependency - add IndexObject to the util module, so that it can be
# imported by the submodule.base
smutil.IndexObject = IndexObject # type: ignore[attr-defined]
smutil.Object = Object # type: ignore[attr-defined]
del(smutil)
# must come after submodule was made available
__all__ = [name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj))]

View File

@ -0,0 +1,208 @@
# base.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.exc import WorkTreeRepositoryUnsupported
from git.util import LazyMixin, join_path_native, stream_copy, bin_to_hex
import gitdb.typ as dbtyp
import os.path as osp
from .util import get_object_type_by_name
# typing ------------------------------------------------------------------
from typing import Any, TYPE_CHECKING, Union
from git.types import PathLike, Commit_ish, Lit_commit_ish
if TYPE_CHECKING:
from git.repo import Repo
from gitdb.base import OStream
from .tree import Tree
from .blob import Blob
from .submodule.base import Submodule
from git.refs.reference import Reference
IndexObjUnion = Union['Tree', 'Blob', 'Submodule']
# --------------------------------------------------------------------------
_assertion_msg_format = "Created object %r whose python type %r disagrees with the acutual git object type %r"
__all__ = ("Object", "IndexObject")
class Object(LazyMixin):
"""Implements an Object which may be Blobs, Trees, Commits and Tags"""
NULL_HEX_SHA = '0' * 40
NULL_BIN_SHA = b'\0' * 20
TYPES = (dbtyp.str_blob_type, dbtyp.str_tree_type, dbtyp.str_commit_type, dbtyp.str_tag_type)
__slots__ = ("repo", "binsha", "size")
type: Union[Lit_commit_ish, None] = None
def __init__(self, repo: 'Repo', binsha: bytes):
"""Initialize an object by identifying it by its binary sha.
All keyword arguments will be set on demand if None.
:param repo: repository this object is located in
:param binsha: 20 byte SHA1"""
super(Object, self).__init__()
self.repo = repo
self.binsha = binsha
assert len(binsha) == 20, "Require 20 byte binary sha, got %r, len = %i" % (binsha, len(binsha))
@classmethod
def new(cls, repo: 'Repo', id: Union[str, 'Reference']) -> Commit_ish:
"""
:return: New Object instance of a type appropriate to the object type behind
id. The id of the newly created object will be a binsha even though
the input id may have been a Reference or Rev-Spec
:param id: reference, rev-spec, or hexsha
:note: This cannot be a __new__ method as it would always call __init__
with the input id which is not necessarily a binsha."""
return repo.rev_parse(str(id))
@classmethod
def new_from_sha(cls, repo: 'Repo', sha1: bytes) -> Commit_ish:
"""
:return: new object instance of a type appropriate to represent the given
binary sha1
:param sha1: 20 byte binary sha1"""
if sha1 == cls.NULL_BIN_SHA:
# the NULL binsha is always the root commit
return get_object_type_by_name(b'commit')(repo, sha1)
# END handle special case
oinfo = repo.odb.info(sha1)
inst = get_object_type_by_name(oinfo.type)(repo, oinfo.binsha)
inst.size = oinfo.size
return inst
def _set_cache_(self, attr: str) -> None:
"""Retrieve object information"""
if attr == "size":
oinfo = self.repo.odb.info(self.binsha)
self.size = oinfo.size # type: int
# assert oinfo.type == self.type, _assertion_msg_format % (self.binsha, oinfo.type, self.type)
else:
super(Object, self)._set_cache_(attr)
def __eq__(self, other: Any) -> bool:
""":return: True if the objects have the same SHA1"""
if not hasattr(other, 'binsha'):
return False
return self.binsha == other.binsha
def __ne__(self, other: Any) -> bool:
""":return: True if the objects do not have the same SHA1 """
if not hasattr(other, 'binsha'):
return True
return self.binsha != other.binsha
def __hash__(self) -> int:
""":return: Hash of our id allowing objects to be used in dicts and sets"""
return hash(self.binsha)
def __str__(self) -> str:
""":return: string of our SHA1 as understood by all git commands"""
return self.hexsha
def __repr__(self) -> str:
""":return: string with pythonic representation of our object"""
return '<git.%s "%s">' % (self.__class__.__name__, self.hexsha)
@property
def hexsha(self) -> str:
""":return: 40 byte hex version of our 20 byte binary sha"""
# b2a_hex produces bytes
return bin_to_hex(self.binsha).decode('ascii')
@property
def data_stream(self) -> 'OStream':
""" :return: File Object compatible stream to the uncompressed raw data of the object
:note: returned streams must be read in order"""
return self.repo.odb.stream(self.binsha)
def stream_data(self, ostream: 'OStream') -> 'Object':
"""Writes our data directly to the given output stream
:param ostream: File object compatible stream object.
:return: self"""
istream = self.repo.odb.stream(self.binsha)
stream_copy(istream, ostream)
return self
class IndexObject(Object):
"""Base for all objects that can be part of the index file , namely Tree, Blob and
SubModule objects"""
__slots__ = ("path", "mode")
# for compatibility with iterable lists
_id_attribute_ = 'path'
def __init__(self,
repo: 'Repo', binsha: bytes, mode: Union[None, int] = None, path: Union[None, PathLike] = None
) -> None:
"""Initialize a newly instanced IndexObject
:param repo: is the Repo we are located in
:param binsha: 20 byte sha1
:param mode:
is the stat compatible file mode as int, use the stat module
to evaluate the information
:param path:
is the path to the file in the file system, relative to the git repository root, i.e.
file.ext or folder/other.ext
:note:
Path may not be set of the index object has been created directly as it cannot
be retrieved without knowing the parent tree."""
super(IndexObject, self).__init__(repo, binsha)
if mode is not None:
self.mode = mode
if path is not None:
self.path = path
def __hash__(self) -> int:
"""
:return:
Hash of our path as index items are uniquely identifiable by path, not
by their data !"""
return hash(self.path)
def _set_cache_(self, attr: str) -> None:
if attr in IndexObject.__slots__:
# they cannot be retrieved lateron ( not without searching for them )
raise AttributeError(
"Attribute '%s' unset: path and mode attributes must have been set during %s object creation"
% (attr, type(self).__name__))
else:
super(IndexObject, self)._set_cache_(attr)
# END handle slot attribute
@property
def name(self) -> str:
""":return: Name portion of the path, effectively being the basename"""
return osp.basename(self.path)
@property
def abspath(self) -> PathLike:
"""
:return:
Absolute path to this index object in the file system ( as opposed to the
.path field which is a path relative to the git repository ).
The returned path will be native to the system and contains '\' on windows. """
if self.repo.working_tree_dir is not None:
return join_path_native(self.repo.working_tree_dir, self.path)
else:
raise WorkTreeRepositoryUnsupported("Working_tree_dir was None or empty")

View File

@ -0,0 +1,35 @@
# blob.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from mimetypes import guess_type
from . import base
from git.types import Literal
__all__ = ('Blob', )
class Blob(base.IndexObject):
"""A Blob encapsulates a git blob object"""
DEFAULT_MIME_TYPE = "text/plain"
type: Literal['blob'] = "blob"
# valid blob modes
executable_mode = 0o100755
file_mode = 0o100644
link_mode = 0o120000
__slots__ = ()
@property
def mime_type(self) -> str:
"""
:return: String describing the mime type of this file (based on the filename)
:note: Defaults to 'text/plain' in case the actual file type is unknown. """
guesses = None
if self.path:
guesses = guess_type(str(self.path))
return guesses and guesses[0] or self.DEFAULT_MIME_TYPE

View File

@ -0,0 +1,663 @@
# commit.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import datetime
from subprocess import Popen, PIPE
from gitdb import IStream
from git.util import (
hex_to_bin,
Actor,
Stats,
finalize_process
)
from git.diff import Diffable
from git.cmd import Git
from .tree import Tree
from . import base
from .util import (
Serializable,
TraversableIterableObj,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from time import (
time,
daylight,
altzone,
timezone,
localtime
)
import os
from io import BytesIO
import logging
# typing ------------------------------------------------------------------
from typing import Any, IO, Iterator, List, Sequence, Tuple, Union, TYPE_CHECKING, cast, Dict
from git.types import PathLike, Literal
if TYPE_CHECKING:
from git.repo import Repo
from git.refs import SymbolicReference
# ------------------------------------------------------------------------
log = logging.getLogger('git.objects.commit')
log.addHandler(logging.NullHandler())
__all__ = ('Commit', )
class Commit(base.Object, TraversableIterableObj, Diffable, Serializable):
"""Wraps a git Commit object.
This class will act lazily on some of its attributes and will query the
value on demand only if it involves calling the git binary."""
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
# CONFIGURATION KEYS
conf_encoding = 'i18n.commitencoding'
# INVARIANTS
default_encoding = "UTF-8"
# object configuration
type: Literal['commit'] = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding", "gpgsig")
_id_attribute_ = "hexsha"
def __init__(self, repo: 'Repo', binsha: bytes, tree: Union[Tree, None] = None,
author: Union[Actor, None] = None,
authored_date: Union[int, None] = None,
author_tz_offset: Union[None, float] = None,
committer: Union[Actor, None] = None,
committed_date: Union[int, None] = None,
committer_tz_offset: Union[None, float] = None,
message: Union[str, bytes, None] = None,
parents: Union[Sequence['Commit'], None] = None,
encoding: Union[str, None] = None,
gpgsig: Union[str, None] = None) -> None:
"""Instantiate a new Commit. All keyword arguments taking None as default will
be implicitly set on first query.
:param binsha: 20 byte sha1
:param parents: tuple( Commit, ... )
is a tuple of commit ids or actual Commits
:param tree: Tree object
:param author: Actor
is the author Actor object
:param authored_date: int_seconds_since_epoch
is the authored DateTime - use time.gmtime() to convert it into a
different format
:param author_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param committer: Actor
is the committer string
:param committed_date: int_seconds_since_epoch
is the committed DateTime - use time.gmtime() to convert it into a
different format
:param committer_tz_offset: int_seconds_west_of_utc
is the timezone that the committed_date is in
:param message: string
is the commit message
:param encoding: string
encoding of the message, defaults to UTF-8
:param parents:
List or tuple of Commit objects which are our parent(s) in the commit
dependency graph
:return: git.Commit
:note:
Timezone information is in the same format and in the same sign
as what time.altzone returns. The sign is inverted compared to git's
UTC timezone."""
super(Commit, self).__init__(repo, binsha)
self.binsha = binsha
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit: 'Commit') -> Tuple['Commit', ...]:
return tuple(commit.parents)
@classmethod
def _calculate_sha_(cls, repo: 'Repo', commit: 'Commit') -> bytes:
'''Calculate the sha of a commit.
:param repo: Repo object the commit should be part of
:param commit: Commit object for which to generate the sha
'''
stream = BytesIO()
commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
return istream.binsha
def replace(self, **kwargs: Any) -> 'Commit':
'''Create new commit object from existing commit object.
Any values provided as keyword arguments will replace the
corresponding attribute in the new object.
'''
attrs = {k: getattr(self, k) for k in self.__slots__}
for attrname in kwargs:
if attrname not in self.__slots__:
raise ValueError('invalid attribute name')
attrs.update(kwargs)
new_commit = self.__class__(self.repo, self.NULL_BIN_SHA, **attrs)
new_commit.binsha = self._calculate_sha_(self.repo, new_commit)
return new_commit
def _set_cache_(self, attr: str) -> None:
if attr in Commit.__slots__:
# read the data in a chunk, its faster - then provide a file wrapper
_binsha, _typename, self.size, stream = self.repo.odb.stream(self.binsha)
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
# END handle attrs
@property
def authored_datetime(self) -> datetime.datetime:
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self) -> datetime.datetime:
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self) -> Union[str, bytes]:
""":return: First line of the commit message"""
if isinstance(self.message, str):
return self.message.split('\n', 1)[0]
else:
return self.message.split(b'\n', 1)[0]
def count(self, paths: Union[PathLike, Sequence[PathLike]] = '', **kwargs: Any) -> int:
"""Count the number of commits reachable from this commit
:param paths:
is an optional path or a list of paths restricting the return value
to commits actually containing the paths
:param kwargs:
Additional options to be passed to git-rev-list. They must not alter
the output style of the command, or parsing will yield incorrect results
:return: int defining the number of reachable commits"""
# yes, it makes a difference whether empty paths are given or not in our case
# as the empty paths version will ignore merge commits for some reason.
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self) -> str:
"""
:return:
String describing the commits hex sha based on the closest Reference.
Mostly useful for UI purposes"""
return self.repo.git.name_rev(self)
@classmethod
def iter_items(cls, repo: 'Repo', rev: Union[str, 'Commit', 'SymbolicReference'], # type: ignore
paths: Union[PathLike, Sequence[PathLike]] = '', **kwargs: Any
) -> Iterator['Commit']:
"""Find all commits matching the given criteria.
:param repo: is the Repo
:param rev: revision specifier, see git-rev-parse for viable options
:param paths:
is an optional path or list of paths, if set only Commits that include the path
or paths will be considered
:param kwargs:
optional keyword arguments to git rev-list where
``max_count`` is the maximum number of commits to fetch
``skip`` is the number of commits to skip
``since`` all commits since i.e. '1970-01-01'
:return: iterator yielding Commit items"""
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args_list: List[PathLike] = ['--']
if paths:
paths_tup: Tuple[PathLike, ...]
if isinstance(paths, (str, os.PathLike)):
paths_tup = (paths, )
else:
paths_tup = tuple(paths)
args_list.extend(paths_tup)
# END if paths
proc = repo.git.rev_list(rev, args_list, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths: Union[PathLike, Sequence[PathLike]] = '', **kwargs: Any) -> Iterator['Commit']:
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@ property
def stats(self) -> Stats:
"""Create a git stat from changes between this commit and its first parent
or from all changes done if this is the very first commit.
:return: git.Stats"""
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True)
return Stats._list_from_string(self.repo, text)
@property
def trailers(self) -> Dict:
"""Get the trailers of the message as dictionary
Git messages can contain trailer information that are similar to RFC 822
e-mail headers (see: https://git-scm.com/docs/git-interpret-trailers).
This funcions calls ``git interpret-trailers --parse`` onto the message
to extract the trailer information. The key value pairs are stripped of
leading and trailing whitespaces before they get saved into a dictionary.
Valid message with trailer:
.. code-block::
Subject line
some body information
another information
key1: value1
key2 : value 2 with inner spaces
dictionary will look like this:
.. code-block::
{
"key1": "value1",
"key2": "value 2 with inner spaces"
}
:return: Dictionary containing whitespace stripped trailer information
"""
d = {}
cmd = ['git', 'interpret-trailers', '--parse']
proc: Git.AutoInterrupt = self.repo.git.execute(cmd, as_process=True, istream=PIPE) # type: ignore
trailer: str = proc.communicate(str(self.message).encode())[0].decode()
if trailer.endswith('\n'):
trailer = trailer[0:-1]
if trailer != '':
for line in trailer.split('\n'):
key, value = line.split(':', 1)
d[key.strip()] = value.strip()
return d
@ classmethod
def _iter_from_process_or_stream(cls, repo: 'Repo', proc_or_stream: Union[Popen, IO]) -> Iterator['Commit']:
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
# def is_proc(inp) -> TypeGuard[Popen]:
# return hasattr(proc_or_stream, 'wait') and not hasattr(proc_or_stream, 'readline')
# def is_stream(inp) -> TypeGuard[IO]:
# return hasattr(proc_or_stream, 'readline')
if hasattr(proc_or_stream, 'wait'):
proc_or_stream = cast(Popen, proc_or_stream)
if proc_or_stream.stdout is not None:
stream = proc_or_stream.stdout
elif hasattr(proc_or_stream, 'readline'):
proc_or_stream = cast(IO, proc_or_stream)
stream = proc_or_stream
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield cls(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
proc_or_stream = cast(Popen, proc_or_stream)
finalize_process(proc_or_stream)
@ classmethod
def create_from_tree(cls, repo: 'Repo', tree: Union[Tree, str], message: str,
parent_commits: Union[None, List['Commit']] = None, head: bool = False,
author: Union[None, Actor] = None, committer: Union[None, Actor] = None,
author_date: Union[None, str] = None, commit_date: Union[None, str] = None) -> 'Commit':
"""Commit the given tree, creating a commit object.
:param repo: Repo object the commit should be part of
:param tree: Tree object or hex or bin sha
the tree of the new commit
:param message: Commit message. It may be an empty string if no message is provided.
It will be converted to a string , in any case.
:param parent_commits:
Optional Commit objects to use as parents for the new commit.
If empty list, the commit will have no parents at all and become
a root commit.
If None , the current head commit will be the parent of the
new commit object
:param head:
If True, the HEAD will be advanced to the new commit automatically.
Else the HEAD will remain pointing on the previous commit. This could
lead to undesired results when diffing files.
:param author: The name of the author, optional. If unset, the repository
configuration is used to obtain this value.
:param committer: The name of the committer, optional. If unset, the
repository configuration is used to obtain this value.
:param author_date: The timestamp for the author field
:param commit_date: The timestamp for the committer field
:return: Commit object representing the new commit
:note:
Additional information about the committer and Author are taken from the
environment or from the git configuration, see git-commit-tree for
more information"""
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
# empty repositories have no head commit
parent_commits = []
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError(f"Parent commit '{p!r}' must be of type {cls}")
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, '')
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
if not isinstance(conf_encoding, str):
raise TypeError("conf_encoding could not be coerced to str")
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
new_commit.binsha = cls._calculate_sha_(repo, new_commit)
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream: BytesIO) -> 'Commit':
write = stream.write
write(("tree %s\n" % self.tree).encode('ascii'))
for p in self.parents:
write(("parent %s\n" % p).encode('ascii'))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write((fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset))).encode(self.encoding))
# encode committer
aname = c.name
write((fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset))).encode(self.encoding))
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode('ascii'))
try:
if self.__getattribute__('gpgsig'):
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode('ascii'))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, str):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream: BytesIO) -> 'Commit':
"""
:param from_rev_list: if true, the stream format is coming from the rev-list command
Otherwise it is assumed to be a plain data stream from our object
"""
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')
self.parents = []
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b'parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode('ascii'))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
# we might run into one or more mergetag blocks, skip those for now
next_line = readline()
while next_line.startswith(b'mergetag '):
next_line = readline()
while next_line.startswith(b' '):
next_line = readline()
# end skip mergetags
# now we can have the encoding line, or an empty line followed by the optional
# message.
self.encoding = self.default_encoding
self.gpgsig = ""
# read headers
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(b' ') + 1:].decode(
self.encoding, 'ignore')
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b' ') + 1:] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
# end read all signature
self.gpgsig = sig.rstrip(b"\n").decode(self.encoding, 'ignore')
if is_next_header:
continue
buf = readline().strip()
# decode the authors name
try:
(self.author, self.authored_date, self.author_tz_offset) = \
parse_actor_and_date(author_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding,
exc_info=True)
try:
self.committer, self.committed_date, self.committer_tz_offset = \
parse_actor_and_date(committer_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding,
exc_info=True)
# END handle author's encoding
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, 'replace')
except UnicodeDecodeError:
log.error("Failed to decode message '%s' using encoding %s",
self.message, self.encoding, exc_info=True)
# END exception handling
return self
#} END serializable implementation

View File

@ -0,0 +1,237 @@
"""Module with functions which are supposed to be as fast as possible"""
from stat import S_ISDIR
from git.compat import (
safe_decode,
defenc
)
# typing ----------------------------------------------
from typing import Callable, List, MutableSequence, Sequence, Tuple, TYPE_CHECKING, Union, overload
if TYPE_CHECKING:
from _typeshed import ReadableBuffer
from git import GitCmdObjectDB
EntryTup = Tuple[bytes, int, str] # same as TreeCacheTup in tree.py
EntryTupOrNone = Union[EntryTup, None]
# ---------------------------------------------------
__all__ = ('tree_to_stream', 'tree_entries_from_data', 'traverse_trees_recursive',
'traverse_tree_recursive')
def tree_to_stream(entries: Sequence[EntryTup], write: Callable[['ReadableBuffer'], Union[int, None]]) -> None:
"""Write the give list of entries into a stream using its write method
:param entries: **sorted** list of tuples with (binsha, mode, name)
:param write: write method which takes a data string"""
ord_zero = ord('0')
bit_mask = 7 # 3 bits set
for binsha, mode, name in entries:
mode_str = b''
for i in range(6):
mode_str = bytes([((mode >> (i * 3)) & bit_mask) + ord_zero]) + mode_str
# END for each 8 octal value
# git slices away the first octal if its zero
if mode_str[0] == ord_zero:
mode_str = mode_str[1:]
# END save a byte
# here it comes: if the name is actually unicode, the replacement below
# will not work as the binsha is not part of the ascii unicode encoding -
# hence we must convert to an utf8 string for it to work properly.
# According to my tests, this is exactly what git does, that is it just
# takes the input literally, which appears to be utf8 on linux.
if isinstance(name, str):
name_bytes = name.encode(defenc)
else:
name_bytes = name # type: ignore[unreachable] # check runtime types - is always str?
write(b''.join((mode_str, b' ', name_bytes, b'\0', binsha)))
# END for each item
def tree_entries_from_data(data: bytes) -> List[EntryTup]:
"""Reads the binary representation of a tree and returns tuples of Tree items
:param data: data block with tree data (as bytes)
:return: list(tuple(binsha, mode, tree_relative_path), ...)"""
ord_zero = ord('0')
space_ord = ord(' ')
len_data = len(data)
i = 0
out = []
while i < len_data:
mode = 0
# read mode
# Some git versions truncate the leading 0, some don't
# The type will be extracted from the mode later
while data[i] != space_ord:
# move existing mode integer up one level being 3 bits
# and add the actual ordinal value of the character
mode = (mode << 3) + (data[i] - ord_zero)
i += 1
# END while reading mode
# byte is space now, skip it
i += 1
# parse name, it is NULL separated
ns = i
while data[i] != 0:
i += 1
# END while not reached NULL
# default encoding for strings in git is utf8
# Only use the respective unicode object if the byte stream was encoded
name_bytes = data[ns:i]
name = safe_decode(name_bytes)
# byte is NULL, get next 20
i += 1
sha = data[i:i + 20]
i = i + 20
out.append((sha, mode, name))
# END for each byte in data stream
return out
def _find_by_name(tree_data: MutableSequence[EntryTupOrNone], name: str, is_dir: bool, start_at: int
) -> EntryTupOrNone:
"""return data entry matching the given name and tree mode
or None.
Before the item is returned, the respective data item is set
None in the tree_data list to mark it done"""
try:
item = tree_data[start_at]
if item and item[2] == name and S_ISDIR(item[1]) == is_dir:
tree_data[start_at] = None
return item
except IndexError:
pass
# END exception handling
for index, item in enumerate(tree_data):
if item and item[2] == name and S_ISDIR(item[1]) == is_dir:
tree_data[index] = None
return item
# END if item matches
# END for each item
return None
@ overload
def _to_full_path(item: None, path_prefix: str) -> None:
...
@ overload
def _to_full_path(item: EntryTup, path_prefix: str) -> EntryTup:
...
def _to_full_path(item: EntryTupOrNone, path_prefix: str) -> EntryTupOrNone:
"""Rebuild entry with given path prefix"""
if not item:
return item
return (item[0], item[1], path_prefix + item[2])
def traverse_trees_recursive(odb: 'GitCmdObjectDB', tree_shas: Sequence[Union[bytes, None]],
path_prefix: str) -> List[Tuple[EntryTupOrNone, ...]]:
"""
:return: list of list with entries according to the given binary tree-shas.
The result is encoded in a list
of n tuple|None per blob/commit, (n == len(tree_shas)), where
* [0] == 20 byte sha
* [1] == mode as int
* [2] == path relative to working tree root
The entry tuple is None if the respective blob/commit did not
exist in the given tree.
:param tree_shas: iterable of shas pointing to trees. All trees must
be on the same level. A tree-sha may be None in which case None
:param path_prefix: a prefix to be added to the returned paths on this level,
set it '' for the first iteration
:note: The ordering of the returned items will be partially lost"""
trees_data: List[List[EntryTupOrNone]] = []
nt = len(tree_shas)
for tree_sha in tree_shas:
if tree_sha is None:
data: List[EntryTupOrNone] = []
else:
# make new list for typing as list invariant
data = list(tree_entries_from_data(odb.stream(tree_sha).read()))
# END handle muted trees
trees_data.append(data)
# END for each sha to get data for
out: List[Tuple[EntryTupOrNone, ...]] = []
# find all matching entries and recursively process them together if the match
# is a tree. If the match is a non-tree item, put it into the result.
# Processed items will be set None
for ti, tree_data in enumerate(trees_data):
for ii, item in enumerate(tree_data):
if not item:
continue
# END skip already done items
entries: List[EntryTupOrNone]
entries = [None for _ in range(nt)]
entries[ti] = item
_sha, mode, name = item
is_dir = S_ISDIR(mode) # type mode bits
# find this item in all other tree data items
# wrap around, but stop one before our current index, hence
# ti+nt, not ti+1+nt
for tio in range(ti + 1, ti + nt):
tio = tio % nt
entries[tio] = _find_by_name(trees_data[tio], name, is_dir, ii)
# END for each other item data
# if we are a directory, enter recursion
if is_dir:
out.extend(traverse_trees_recursive(
odb, [((ei and ei[0]) or None) for ei in entries], path_prefix + name + '/'))
else:
out.append(tuple(_to_full_path(e, path_prefix) for e in entries))
# END handle recursion
# finally mark it done
tree_data[ii] = None
# END for each item
# we are done with one tree, set all its data empty
del(tree_data[:])
# END for each tree_data chunk
return out
def traverse_tree_recursive(odb: 'GitCmdObjectDB', tree_sha: bytes, path_prefix: str) -> List[EntryTup]:
"""
:return: list of entries of the tree pointed to by the binary tree_sha. An entry
has the following format:
* [0] 20 byte sha
* [1] mode as int
* [2] path relative to the repository
:param path_prefix: prefix to prepend to the front of all returned paths"""
entries = []
data = tree_entries_from_data(odb.stream(tree_sha).read())
# unpacking/packing is faster than accessing individual items
for sha, mode, name in data:
if S_ISDIR(mode):
entries.extend(traverse_tree_recursive(odb, sha, path_prefix + name + '/'))
else:
entries.append((sha, mode, path_prefix + name))
# END for each item
return entries

View File

@ -0,0 +1,2 @@
# NOTE: Cannot import anything here as the top-level _init_ has to handle
# our dependencies

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,362 @@
from .base import (
Submodule,
UpdateProgress
)
from .util import find_first_remote_branch
from git.exc import InvalidGitRepositoryError
import git
import logging
# typing -------------------------------------------------------------------
from typing import TYPE_CHECKING, Union
from git.types import Commit_ish
if TYPE_CHECKING:
from git.repo import Repo
from git.util import IterableList
# ----------------------------------------------------------------------------
__all__ = ["RootModule", "RootUpdateProgress"]
log = logging.getLogger('git.objects.submodule.root')
log.addHandler(logging.NullHandler())
class RootUpdateProgress(UpdateProgress):
"""Utility class which adds more opcodes to the UpdateProgress"""
REMOVE, PATHCHANGE, BRANCHCHANGE, URLCHANGE = [
1 << x for x in range(UpdateProgress._num_op_codes, UpdateProgress._num_op_codes + 4)]
_num_op_codes = UpdateProgress._num_op_codes + 4
__slots__ = ()
BEGIN = RootUpdateProgress.BEGIN
END = RootUpdateProgress.END
REMOVE = RootUpdateProgress.REMOVE
BRANCHCHANGE = RootUpdateProgress.BRANCHCHANGE
URLCHANGE = RootUpdateProgress.URLCHANGE
PATHCHANGE = RootUpdateProgress.PATHCHANGE
class RootModule(Submodule):
"""A (virtual) Root of all submodules in the given repository. It can be used
to more easily traverse all submodules of the master repository"""
__slots__ = ()
k_root_name = '__ROOT__'
def __init__(self, repo: 'Repo'):
# repo, binsha, mode=None, path=None, name = None, parent_commit=None, url=None, ref=None)
super(RootModule, self).__init__(
repo,
binsha=self.NULL_BIN_SHA,
mode=self.k_default_mode,
path='',
name=self.k_root_name,
parent_commit=repo.head.commit,
url='',
branch_path=git.Head.to_full_path(self.k_head_default)
)
def _clear_cache(self) -> None:
"""May not do anything"""
pass
#{ Interface
def update(self, previous_commit: Union[Commit_ish, None] = None, # type: ignore[override]
recursive: bool = True, force_remove: bool = False, init: bool = True,
to_latest_revision: bool = False, progress: Union[None, 'RootUpdateProgress'] = None,
dry_run: bool = False, force_reset: bool = False, keep_going: bool = False
) -> 'RootModule':
"""Update the submodules of this repository to the current HEAD commit.
This method behaves smartly by determining changes of the path of a submodules
repository, next to changes to the to-be-checked-out commit or the branch to be
checked out. This works if the submodules ID does not change.
Additionally it will detect addition and removal of submodules, which will be handled
gracefully.
:param previous_commit: If set to a commit'ish, the commit we should use
as the previous commit the HEAD pointed to before it was set to the commit it points to now.
If None, it defaults to HEAD@{1} otherwise
:param recursive: if True, the children of submodules will be updated as well
using the same technique
:param force_remove: If submodules have been deleted, they will be forcibly removed.
Otherwise the update may fail if a submodule's repository cannot be deleted as
changes have been made to it (see Submodule.update() for more information)
:param init: If we encounter a new module which would need to be initialized, then do it.
:param to_latest_revision: If True, instead of checking out the revision pointed to
by this submodule's sha, the checked out tracking branch will be merged with the
latest remote branch fetched from the repository's origin.
Unless force_reset is specified, a local tracking branch will never be reset into its past, therefore
the remote branch must be in the future for this to have an effect.
:param force_reset: if True, submodules may checkout or reset their branch even if the repository has
pending changes that would be overwritten, or if the local tracking branch is in the future of the
remote tracking branch and would be reset into its past.
:param progress: RootUpdateProgress instance or None if no progress should be sent
:param dry_run: if True, operations will not actually be performed. Progress messages
will change accordingly to indicate the WOULD DO state of the operation.
:param keep_going: if True, we will ignore but log all errors, and keep going recursively.
Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see
otherwise.
In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules
:return: self"""
if self.repo.bare:
raise InvalidGitRepositoryError("Cannot update submodules in bare repositories")
# END handle bare
if progress is None:
progress = RootUpdateProgress()
# END assure progress is set
prefix = ''
if dry_run:
prefix = 'DRY-RUN: '
repo = self.repo
try:
# SETUP BASE COMMIT
###################
cur_commit = repo.head.commit
if previous_commit is None:
try:
previous_commit = repo.commit(repo.head.log_entry(-1).oldhexsha)
if previous_commit.binsha == previous_commit.NULL_BIN_SHA:
raise IndexError
# END handle initial commit
except IndexError:
# in new repositories, there is no previous commit
previous_commit = cur_commit
# END exception handling
else:
previous_commit = repo.commit(previous_commit) # obtain commit object
# END handle previous commit
psms: 'IterableList[Submodule]' = self.list_items(repo, parent_commit=previous_commit)
sms: 'IterableList[Submodule]' = self.list_items(repo)
spsms = set(psms)
ssms = set(sms)
# HANDLE REMOVALS
###################
rrsm = (spsms - ssms)
len_rrsm = len(rrsm)
for i, rsm in enumerate(rrsm):
op = REMOVE
if i == 0:
op |= BEGIN
# END handle begin
# fake it into thinking its at the current commit to allow deletion
# of previous module. Trigger the cache to be updated before that
progress.update(op, i, len_rrsm, prefix + "Removing submodule %r at %s" % (rsm.name, rsm.abspath))
rsm._parent_commit = repo.head.commit
rsm.remove(configuration=False, module=True, force=force_remove, dry_run=dry_run)
if i == len_rrsm - 1:
op |= END
# END handle end
progress.update(op, i, len_rrsm, prefix + "Done removing submodule %r" % rsm.name)
# END for each removed submodule
# HANDLE PATH RENAMES
#####################
# url changes + branch changes
csms = (spsms & ssms)
len_csms = len(csms)
for i, csm in enumerate(csms):
psm: 'Submodule' = psms[csm.name]
sm: 'Submodule' = sms[csm.name]
# PATH CHANGES
##############
if sm.path != psm.path and psm.module_exists():
progress.update(BEGIN | PATHCHANGE, i, len_csms, prefix +
"Moving repository of submodule %r from %s to %s"
% (sm.name, psm.abspath, sm.abspath))
# move the module to the new path
if not dry_run:
psm.move(sm.path, module=True, configuration=False)
# END handle dry_run
progress.update(
END | PATHCHANGE, i, len_csms, prefix + "Done moving repository of submodule %r" % sm.name)
# END handle path changes
if sm.module_exists():
# HANDLE URL CHANGE
###################
if sm.url != psm.url:
# Add the new remote, remove the old one
# This way, if the url just changes, the commits will not
# have to be re-retrieved
nn = '__new_origin__'
smm = sm.module()
rmts = smm.remotes
# don't do anything if we already have the url we search in place
if len([r for r in rmts if r.url == sm.url]) == 0:
progress.update(BEGIN | URLCHANGE, i, len_csms, prefix +
"Changing url of submodule %r from %s to %s" % (sm.name, psm.url, sm.url))
if not dry_run:
assert nn not in [r.name for r in rmts]
smr = smm.create_remote(nn, sm.url)
smr.fetch(progress=progress)
# If we have a tracking branch, it should be available
# in the new remote as well.
if len([r for r in smr.refs if r.remote_head == sm.branch_name]) == 0:
raise ValueError(
"Submodule branch named %r was not available in new submodule remote at %r"
% (sm.branch_name, sm.url)
)
# END head is not detached
# now delete the changed one
rmt_for_deletion = None
for remote in rmts:
if remote.url == psm.url:
rmt_for_deletion = remote
break
# END if urls match
# END for each remote
# if we didn't find a matching remote, but have exactly one,
# we can safely use this one
if rmt_for_deletion is None:
if len(rmts) == 1:
rmt_for_deletion = rmts[0]
else:
# if we have not found any remote with the original url
# we may not have a name. This is a special case,
# and its okay to fail here
# Alternatively we could just generate a unique name and leave all
# existing ones in place
raise InvalidGitRepositoryError(
"Couldn't find original remote-repo at url %r" % psm.url)
# END handle one single remote
# END handle check we found a remote
orig_name = rmt_for_deletion.name
smm.delete_remote(rmt_for_deletion)
# NOTE: Currently we leave tags from the deleted remotes
# as well as separate tracking branches in the possibly totally
# changed repository ( someone could have changed the url to
# another project ). At some point, one might want to clean
# it up, but the danger is high to remove stuff the user
# has added explicitly
# rename the new remote back to what it was
smr.rename(orig_name)
# early on, we verified that the our current tracking branch
# exists in the remote. Now we have to assure that the
# sha we point to is still contained in the new remote
# tracking branch.
smsha = sm.binsha
found = False
rref = smr.refs[self.branch_name]
for c in rref.commit.traverse():
if c.binsha == smsha:
found = True
break
# END traverse all commits in search for sha
# END for each commit
if not found:
# adjust our internal binsha to use the one of the remote
# this way, it will be checked out in the next step
# This will change the submodule relative to us, so
# the user will be able to commit the change easily
log.warning("Current sha %s was not contained in the tracking\
branch at the new remote, setting it the the remote's tracking branch", sm.hexsha)
sm.binsha = rref.commit.binsha
# END reset binsha
# NOTE: All checkout is performed by the base implementation of update
# END handle dry_run
progress.update(
END | URLCHANGE, i, len_csms, prefix + "Done adjusting url of submodule %r" % (sm.name))
# END skip remote handling if new url already exists in module
# END handle url
# HANDLE PATH CHANGES
#####################
if sm.branch_path != psm.branch_path:
# finally, create a new tracking branch which tracks the
# new remote branch
progress.update(BEGIN | BRANCHCHANGE, i, len_csms, prefix +
"Changing branch of submodule %r from %s to %s"
% (sm.name, psm.branch_path, sm.branch_path))
if not dry_run:
smm = sm.module()
smmr = smm.remotes
# As the branch might not exist yet, we will have to fetch all remotes to be sure ... .
for remote in smmr:
remote.fetch(progress=progress)
# end for each remote
try:
tbr = git.Head.create(smm, sm.branch_name, logmsg='branch: Created from HEAD')
except OSError:
# ... or reuse the existing one
tbr = git.Head(smm, sm.branch_path)
# END assure tracking branch exists
tbr.set_tracking_branch(find_first_remote_branch(smmr, sm.branch_name))
# NOTE: All head-resetting is done in the base implementation of update
# but we will have to checkout the new branch here. As it still points to the currently
# checkout out commit, we don't do any harm.
# As we don't want to update working-tree or index, changing the ref is all there is to do
smm.head.reference = tbr
# END handle dry_run
progress.update(
END | BRANCHCHANGE, i, len_csms, prefix + "Done changing branch of submodule %r" % sm.name)
# END handle branch
# END handle
# END for each common submodule
except Exception as err:
if not keep_going:
raise
log.error(str(err))
# end handle keep_going
# FINALLY UPDATE ALL ACTUAL SUBMODULES
######################################
for sm in sms:
# update the submodule using the default method
sm.update(recursive=False, init=init, to_latest_revision=to_latest_revision,
progress=progress, dry_run=dry_run, force=force_reset, keep_going=keep_going)
# update recursively depth first - question is which inconsitent
# state will be better in case it fails somewhere. Defective branch
# or defective depth. The RootSubmodule type will never process itself,
# which was done in the previous expression
if recursive:
# the module would exist by now if we are not in dry_run mode
if sm.module_exists():
type(self)(sm.module()).update(recursive=True, force_remove=force_remove,
init=init, to_latest_revision=to_latest_revision,
progress=progress, dry_run=dry_run, force_reset=force_reset,
keep_going=keep_going)
# END handle dry_run
# END handle recursive
# END for each submodule to update
return self
def module(self) -> 'Repo':
""":return: the actual repository containing the submodules"""
return self.repo
#} END interface
#} END classes

View File

@ -0,0 +1,110 @@
import git
from git.exc import InvalidGitRepositoryError
from git.config import GitConfigParser
from io import BytesIO
import weakref
# typing -----------------------------------------------------------------------
from typing import Any, Sequence, TYPE_CHECKING, Union
from git.types import PathLike
if TYPE_CHECKING:
from .base import Submodule
from weakref import ReferenceType
from git.repo import Repo
from git.refs import Head
from git import Remote
from git.refs import RemoteReference
__all__ = ('sm_section', 'sm_name', 'mkhead', 'find_first_remote_branch',
'SubmoduleConfigParser')
#{ Utilities
def sm_section(name: str) -> str:
""":return: section title used in .gitmodules configuration file"""
return f'submodule "{name}"'
def sm_name(section: str) -> str:
""":return: name of the submodule as parsed from the section name"""
section = section.strip()
return section[11:-1]
def mkhead(repo: 'Repo', path: PathLike) -> 'Head':
""":return: New branch/head instance"""
return git.Head(repo, git.Head.to_full_path(path))
def find_first_remote_branch(remotes: Sequence['Remote'], branch_name: str) -> 'RemoteReference':
"""Find the remote branch matching the name of the given branch or raise InvalidGitRepositoryError"""
for remote in remotes:
try:
return remote.refs[branch_name]
except IndexError:
continue
# END exception handling
# END for remote
raise InvalidGitRepositoryError("Didn't find remote branch '%r' in any of the given remotes" % branch_name)
#} END utilities
#{ Classes
class SubmoduleConfigParser(GitConfigParser):
"""
Catches calls to _write, and updates the .gitmodules blob in the index
with the new data, if we have written into a stream. Otherwise it will
add the local file to the index to make it correspond with the working tree.
Additionally, the cache must be cleared
Please note that no mutating method will work in bare mode
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._smref: Union['ReferenceType[Submodule]', None] = None
self._index = None
self._auto_write = True
super(SubmoduleConfigParser, self).__init__(*args, **kwargs)
#{ Interface
def set_submodule(self, submodule: 'Submodule') -> None:
"""Set this instance's submodule. It must be called before
the first write operation begins"""
self._smref = weakref.ref(submodule)
def flush_to_index(self) -> None:
"""Flush changes in our configuration file to the index"""
assert self._smref is not None
# should always have a file here
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index
# END handle index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache()
# END handle weakref
#} END interface
#{ Overridden Methods
def write(self) -> None: # type: ignore[override]
rval: None = super(SubmoduleConfigParser, self).write()
self.flush_to_index()
return rval
# END overridden methods
#} END classes

View File

@ -0,0 +1,93 @@
# objects.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
""" Module containing all object based types. """
from . import base
from .util import get_object_type_by_name, parse_actor_and_date
from ..util import hex_to_bin
from ..compat import defenc
from typing import List, TYPE_CHECKING, Union
from git.types import Literal
if TYPE_CHECKING:
from git.repo import Repo
from git.util import Actor
from .commit import Commit
from .blob import Blob
from .tree import Tree
__all__ = ("TagObject", )
class TagObject(base.Object):
"""Non-Lightweight tag carrying additional information about an object we are pointing to."""
type: Literal['tag'] = "tag"
__slots__ = ("object", "tag", "tagger", "tagged_date", "tagger_tz_offset", "message")
def __init__(self, repo: 'Repo', binsha: bytes,
object: Union[None, base.Object] = None,
tag: Union[None, str] = None,
tagger: Union[None, 'Actor'] = None,
tagged_date: Union[int, None] = None,
tagger_tz_offset: Union[int, None] = None,
message: Union[str, None] = None
) -> None: # @ReservedAssignment
"""Initialize a tag object with additional data
:param repo: repository this object is located in
:param binsha: 20 byte SHA1
:param object: Object instance of object we are pointing to
:param tag: name of this tag
:param tagger: Actor identifying the tagger
:param tagged_date: int_seconds_since_epoch
is the DateTime of the tag creation - use time.gmtime to convert
it into a different format
:param tagged_tz_offset: int_seconds_west_of_utc is the timezone that the
authored_date is in, in a format similar to time.altzone"""
super(TagObject, self).__init__(repo, binsha)
if object is not None:
self.object: Union['Commit', 'Blob', 'Tree', 'TagObject'] = object
if tag is not None:
self.tag = tag
if tagger is not None:
self.tagger = tagger
if tagged_date is not None:
self.tagged_date = tagged_date
if tagger_tz_offset is not None:
self.tagger_tz_offset = tagger_tz_offset
if message is not None:
self.message = message
def _set_cache_(self, attr: str) -> None:
"""Cache all our attributes at once"""
if attr in TagObject.__slots__:
ostream = self.repo.odb.stream(self.binsha)
lines: List[str] = ostream.read().decode(defenc, 'replace').splitlines()
_obj, hexsha = lines[0].split(" ")
_type_token, type_name = lines[1].split(" ")
object_type = get_object_type_by_name(type_name.encode('ascii'))
self.object = \
object_type(self.repo, hex_to_bin(hexsha))
self.tag = lines[2][4:] # tag <tag name>
if len(lines) > 3:
tagger_info = lines[3] # tagger <actor> <date>
self.tagger, self.tagged_date, self.tagger_tz_offset = parse_actor_and_date(tagger_info)
# line 4 empty - it could mark the beginning of the next header
# in case there really is no message, it would not exist. Otherwise
# a newline separates header from message
if len(lines) > 5:
self.message = "\n".join(lines[5:])
else:
self.message = ''
# END check our attributes
else:
super(TagObject, self)._set_cache_(attr)

View File

@ -0,0 +1,398 @@
# tree.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.util import IterableList, join_path
import git.diff as git_diff
from git.util import to_bin_sha
from . import util
from .base import IndexObject, IndexObjUnion
from .blob import Blob
from .submodule.base import Submodule
from .fun import (
tree_entries_from_data,
tree_to_stream
)
# typing -------------------------------------------------
from typing import (Any, Callable, Dict, Iterable, Iterator, List,
Tuple, Type, Union, cast, TYPE_CHECKING)
from git.types import PathLike, Literal
if TYPE_CHECKING:
from git.repo import Repo
from io import BytesIO
TreeCacheTup = Tuple[bytes, int, str]
TraversedTreeTup = Union[Tuple[Union['Tree', None], IndexObjUnion,
Tuple['Submodule', 'Submodule']]]
# def is_tree_cache(inp: Tuple[bytes, int, str]) -> TypeGuard[TreeCacheTup]:
# return isinstance(inp[0], bytes) and isinstance(inp[1], int) and isinstance([inp], str)
#--------------------------------------------------------
cmp: Callable[[str, str], int] = lambda a, b: (a > b) - (a < b)
__all__ = ("TreeModifier", "Tree")
def git_cmp(t1: TreeCacheTup, t2: TreeCacheTup) -> int:
a, b = t1[2], t2[2]
# assert isinstance(a, str) and isinstance(b, str)
len_a, len_b = len(a), len(b)
min_len = min(len_a, len_b)
min_cmp = cmp(a[:min_len], b[:min_len])
if min_cmp:
return min_cmp
return len_a - len_b
def merge_sort(a: List[TreeCacheTup],
cmp: Callable[[TreeCacheTup, TreeCacheTup], int]) -> None:
if len(a) < 2:
return None
mid = len(a) // 2
lefthalf = a[:mid]
righthalf = a[mid:]
merge_sort(lefthalf, cmp)
merge_sort(righthalf, cmp)
i = 0
j = 0
k = 0
while i < len(lefthalf) and j < len(righthalf):
if cmp(lefthalf[i], righthalf[j]) <= 0:
a[k] = lefthalf[i]
i = i + 1
else:
a[k] = righthalf[j]
j = j + 1
k = k + 1
while i < len(lefthalf):
a[k] = lefthalf[i]
i = i + 1
k = k + 1
while j < len(righthalf):
a[k] = righthalf[j]
j = j + 1
k = k + 1
class TreeModifier(object):
"""A utility class providing methods to alter the underlying cache in a list-like fashion.
Once all adjustments are complete, the _cache, which really is a reference to
the cache of a tree, will be sorted. Assuring it will be in a serializable state"""
__slots__ = '_cache'
def __init__(self, cache: List[TreeCacheTup]) -> None:
self._cache = cache
def _index_by_name(self, name: str) -> int:
""":return: index of an item with name, or -1 if not found"""
for i, t in enumerate(self._cache):
if t[2] == name:
return i
# END found item
# END for each item in cache
return -1
#{ Interface
def set_done(self) -> 'TreeModifier':
"""Call this method once you are done modifying the tree information.
It may be called several times, but be aware that each call will cause
a sort operation
:return self:"""
merge_sort(self._cache, git_cmp)
return self
#} END interface
#{ Mutators
def add(self, sha: bytes, mode: int, name: str, force: bool = False) -> 'TreeModifier':
"""Add the given item to the tree. If an item with the given name already
exists, nothing will be done, but a ValueError will be raised if the
sha and mode of the existing item do not match the one you add, unless
force is True
:param sha: The 20 or 40 byte sha of the item to add
:param mode: int representing the stat compatible mode of the item
:param force: If True, an item with your name and information will overwrite
any existing item with the same name, no matter which information it has
:return: self"""
if '/' in name:
raise ValueError("Name must not contain '/' characters")
if (mode >> 12) not in Tree._map_id_to_type:
raise ValueError("Invalid object type according to mode %o" % mode)
sha = to_bin_sha(sha)
index = self._index_by_name(name)
item = (sha, mode, name)
# assert is_tree_cache(item)
if index == -1:
self._cache.append(item)
else:
if force:
self._cache[index] = item
else:
ex_item = self._cache[index]
if ex_item[0] != sha or ex_item[1] != mode:
raise ValueError("Item %r existed with different properties" % name)
# END handle mismatch
# END handle force
# END handle name exists
return self
def add_unchecked(self, binsha: bytes, mode: int, name: str) -> None:
"""Add the given item to the tree, its correctness is assumed, which
puts the caller into responsibility to assure the input is correct.
For more information on the parameters, see ``add``
:param binsha: 20 byte binary sha"""
assert isinstance(binsha, bytes) and isinstance(mode, int) and isinstance(name, str)
tree_cache = (binsha, mode, name)
self._cache.append(tree_cache)
def __delitem__(self, name: str) -> None:
"""Deletes an item with the given name if it exists"""
index = self._index_by_name(name)
if index > -1:
del(self._cache[index])
#} END mutators
class Tree(IndexObject, git_diff.Diffable, util.Traversable, util.Serializable):
"""Tree objects represent an ordered list of Blobs and other Trees.
``Tree as a list``::
Access a specific blob using the
tree['filename'] notation.
You may as well access by index
blob = tree[0]
"""
type: Literal['tree'] = "tree"
__slots__ = "_cache"
# actual integer ids for comparison
commit_id = 0o16 # equals stat.S_IFDIR | stat.S_IFLNK - a directory link
blob_id = 0o10
symlink_id = 0o12
tree_id = 0o04
_map_id_to_type: Dict[int, Type[IndexObjUnion]] = {
commit_id: Submodule,
blob_id: Blob,
symlink_id: Blob
# tree id added once Tree is defined
}
def __init__(self, repo: 'Repo', binsha: bytes, mode: int = tree_id << 12, path: Union[PathLike, None] = None):
super(Tree, self).__init__(repo, binsha, mode, path)
@ classmethod
def _get_intermediate_items(cls, index_object: IndexObjUnion,
) -> Union[Tuple['Tree', ...], Tuple[()]]:
if index_object.type == "tree":
return tuple(index_object._iter_convert_to_object(index_object._cache))
return ()
def _set_cache_(self, attr: str) -> None:
if attr == "_cache":
# Set the data when we need it
ostream = self.repo.odb.stream(self.binsha)
self._cache: List[TreeCacheTup] = tree_entries_from_data(ostream.read())
else:
super(Tree, self)._set_cache_(attr)
# END handle attribute
def _iter_convert_to_object(self, iterable: Iterable[TreeCacheTup]
) -> Iterator[IndexObjUnion]:
"""Iterable yields tuples of (binsha, mode, name), which will be converted
to the respective object representation"""
for binsha, mode, name in iterable:
path = join_path(self.path, name)
try:
yield self._map_id_to_type[mode >> 12](self.repo, binsha, mode, path)
except KeyError as e:
raise TypeError("Unknown mode %o found in tree data for path '%s'" % (mode, path)) from e
# END for each item
def join(self, file: str) -> IndexObjUnion:
"""Find the named object in this tree's contents
:return: ``git.Blob`` or ``git.Tree`` or ``git.Submodule``
:raise KeyError: if given file or tree does not exist in tree"""
msg = "Blob or Tree named %r not found"
if '/' in file:
tree = self
item = self
tokens = file.split('/')
for i, token in enumerate(tokens):
item = tree[token]
if item.type == 'tree':
tree = item
else:
# safety assertion - blobs are at the end of the path
if i != len(tokens) - 1:
raise KeyError(msg % file)
return item
# END handle item type
# END for each token of split path
if item == self:
raise KeyError(msg % file)
return item
else:
for info in self._cache:
if info[2] == file: # [2] == name
return self._map_id_to_type[info[1] >> 12](self.repo, info[0], info[1],
join_path(self.path, info[2]))
# END for each obj
raise KeyError(msg % file)
# END handle long paths
def __truediv__(self, file: str) -> IndexObjUnion:
"""For PY3 only"""
return self.join(file)
@ property
def trees(self) -> List['Tree']:
""":return: list(Tree, ...) list of trees directly below this tree"""
return [i for i in self if i.type == "tree"]
@ property
def blobs(self) -> List[Blob]:
""":return: list(Blob, ...) list of blobs directly below this tree"""
return [i for i in self if i.type == "blob"]
@ property
def cache(self) -> TreeModifier:
"""
:return: An object allowing to modify the internal cache. This can be used
to change the tree's contents. When done, make sure you call ``set_done``
on the tree modifier, or serialization behaviour will be incorrect.
See the ``TreeModifier`` for more information on how to alter the cache"""
return TreeModifier(self._cache)
def traverse(self, # type: ignore[override]
predicate: Callable[[Union[IndexObjUnion, TraversedTreeTup], int], bool] = lambda i, d: True,
prune: Callable[[Union[IndexObjUnion, TraversedTreeTup], int], bool] = lambda i, d: False,
depth: int = -1,
branch_first: bool = True,
visit_once: bool = False,
ignore_self: int = 1,
as_edge: bool = False
) -> Union[Iterator[IndexObjUnion],
Iterator[TraversedTreeTup]]:
"""For documentation, see util.Traversable._traverse()
Trees are set to visit_once = False to gain more performance in the traversal"""
# """
# # To typecheck instead of using cast.
# import itertools
# def is_tree_traversed(inp: Tuple) -> TypeGuard[Tuple[Iterator[Union['Tree', 'Blob', 'Submodule']]]]:
# return all(isinstance(x, (Blob, Tree, Submodule)) for x in inp[1])
# ret = super(Tree, self).traverse(predicate, prune, depth, branch_first, visit_once, ignore_self)
# ret_tup = itertools.tee(ret, 2)
# assert is_tree_traversed(ret_tup), f"Type is {[type(x) for x in list(ret_tup[0])]}"
# return ret_tup[0]"""
return cast(Union[Iterator[IndexObjUnion], Iterator[TraversedTreeTup]],
super(Tree, self)._traverse(predicate, prune, depth, # type: ignore
branch_first, visit_once, ignore_self))
def list_traverse(self, *args: Any, **kwargs: Any) -> IterableList[IndexObjUnion]:
"""
:return: IterableList with the results of the traversal as produced by
traverse()
Tree -> IterableList[Union['Submodule', 'Tree', 'Blob']]
"""
return super(Tree, self)._list_traverse(* args, **kwargs)
# List protocol
def __getslice__(self, i: int, j: int) -> List[IndexObjUnion]:
return list(self._iter_convert_to_object(self._cache[i:j]))
def __iter__(self) -> Iterator[IndexObjUnion]:
return self._iter_convert_to_object(self._cache)
def __len__(self) -> int:
return len(self._cache)
def __getitem__(self, item: Union[str, int, slice]) -> IndexObjUnion:
if isinstance(item, int):
info = self._cache[item]
return self._map_id_to_type[info[1] >> 12](self.repo, info[0], info[1], join_path(self.path, info[2]))
if isinstance(item, str):
# compatibility
return self.join(item)
# END index is basestring
raise TypeError("Invalid index type: %r" % item)
def __contains__(self, item: Union[IndexObjUnion, PathLike]) -> bool:
if isinstance(item, IndexObject):
for info in self._cache:
if item.binsha == info[0]:
return True
# END compare sha
# END for each entry
# END handle item is index object
# compatibility
# treat item as repo-relative path
else:
path = self.path
for info in self._cache:
if item == join_path(path, info[2]):
return True
# END for each item
return False
def __reversed__(self) -> Iterator[IndexObjUnion]:
return reversed(self._iter_convert_to_object(self._cache)) # type: ignore
def _serialize(self, stream: 'BytesIO') -> 'Tree':
"""Serialize this tree into the stream. Please note that we will assume
our tree data to be in a sorted state. If this is not the case, serialization
will not generate a correct tree representation as these are assumed to be sorted
by algorithms"""
tree_to_stream(self._cache, stream.write)
return self
def _deserialize(self, stream: 'BytesIO') -> 'Tree':
self._cache = tree_entries_from_data(stream.read())
return self
# END tree
# finalize map definition
Tree._map_id_to_type[Tree.tree_id] = Tree
#

View File

@ -0,0 +1,573 @@
# util.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Module for general utility functions"""
from abc import ABC, abstractmethod
import warnings
from git.util import (
IterableList,
IterableObj,
Actor
)
import re
from collections import deque
from string import digits
import time
import calendar
from datetime import datetime, timedelta, tzinfo
# typing ------------------------------------------------------------
from typing import (Any, Callable, Deque, Iterator, Generic, NamedTuple, overload, Sequence, # NOQA: F401
TYPE_CHECKING, Tuple, Type, TypeVar, Union, cast)
from git.types import Has_id_attribute, Literal, _T # NOQA: F401
if TYPE_CHECKING:
from io import BytesIO, StringIO
from .commit import Commit
from .blob import Blob
from .tag import TagObject
from .tree import Tree, TraversedTreeTup
from subprocess import Popen
from .submodule.base import Submodule
from git.types import Protocol, runtime_checkable
else:
# Protocol = Generic[_T] # NNeeded for typing bug #572?
Protocol = ABC
def runtime_checkable(f):
return f
class TraverseNT(NamedTuple):
depth: int
item: Union['Traversable', 'Blob']
src: Union['Traversable', None]
T_TIobj = TypeVar('T_TIobj', bound='TraversableIterableObj') # for TraversableIterableObj.traverse()
TraversedTup = Union[Tuple[Union['Traversable', None], 'Traversable'], # for commit, submodule
'TraversedTreeTup'] # for tree.traverse()
# --------------------------------------------------------------------
__all__ = ('get_object_type_by_name', 'parse_date', 'parse_actor_and_date',
'ProcessStreamAdapter', 'Traversable', 'altz_to_utctz_str', 'utctz_to_altz',
'verify_utctz', 'Actor', 'tzoffset', 'utc')
ZERO = timedelta(0)
#{ Functions
def mode_str_to_int(modestr: Union[bytes, str]) -> int:
"""
:param modestr: string like 755 or 644 or 100644 - only the last 6 chars will be used
:return:
String identifying a mode compatible to the mode methods ids of the
stat module regarding the rwx permissions for user, group and other,
special flags and file system flags, i.e. whether it is a symlink
for example."""
mode = 0
for iteration, char in enumerate(reversed(modestr[-6:])):
char = cast(Union[str, int], char)
mode += int(char) << iteration * 3
# END for each char
return mode
def get_object_type_by_name(object_type_name: bytes
) -> Union[Type['Commit'], Type['TagObject'], Type['Tree'], Type['Blob']]:
"""
:return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown"""
if object_type_name == b"commit":
from . import commit
return commit.Commit
elif object_type_name == b"tag":
from . import tag
return tag.TagObject
elif object_type_name == b"blob":
from . import blob
return blob.Blob
elif object_type_name == b"tree":
from . import tree
return tree.Tree
else:
raise ValueError("Cannot handle unknown object type: %s" % object_type_name.decode())
def utctz_to_altz(utctz: str) -> int:
"""we convert utctz to the timezone in seconds, it is the format time.altzone
returns. Git stores it as UTC timezone which has the opposite sign as well,
which explains the -1 * ( that was made explicit here )
:param utctz: git utc timezone string, i.e. +0200"""
return -1 * int(float(utctz) / 100 * 3600)
def altz_to_utctz_str(altz: float) -> str:
"""As above, but inverses the operation, returning a string that can be used
in commit objects"""
utci = -1 * int((float(altz) / 3600) * 100)
utcs = str(abs(utci))
utcs = "0" * (4 - len(utcs)) + utcs
prefix = (utci < 0 and '-') or '+'
return prefix + utcs
def verify_utctz(offset: str) -> str:
""":raise ValueError: if offset is incorrect
:return: offset"""
fmt_exc = ValueError("Invalid timezone offset format: %s" % offset)
if len(offset) != 5:
raise fmt_exc
if offset[0] not in "+-":
raise fmt_exc
if offset[1] not in digits or\
offset[2] not in digits or\
offset[3] not in digits or\
offset[4] not in digits:
raise fmt_exc
# END for each char
return offset
class tzoffset(tzinfo):
def __init__(self, secs_west_of_utc: float, name: Union[None, str] = None) -> None:
self._offset = timedelta(seconds=-secs_west_of_utc)
self._name = name or 'fixed'
def __reduce__(self) -> Tuple[Type['tzoffset'], Tuple[float, str]]:
return tzoffset, (-self._offset.total_seconds(), self._name)
def utcoffset(self, dt: Union[datetime, None]) -> timedelta:
return self._offset
def tzname(self, dt: Union[datetime, None]) -> str:
return self._name
def dst(self, dt: Union[datetime, None]) -> timedelta:
return ZERO
utc = tzoffset(0, 'UTC')
def from_timestamp(timestamp: float, tz_offset: float) -> datetime:
"""Converts a timestamp + tz_offset into an aware datetime instance."""
utc_dt = datetime.fromtimestamp(timestamp, utc)
try:
local_dt = utc_dt.astimezone(tzoffset(tz_offset))
return local_dt
except ValueError:
return utc_dt
def parse_date(string_date: Union[str, datetime]) -> Tuple[int, int]:
"""
Parse the given date as one of the following
* aware datetime instance
* Git internal format: timestamp offset
* RFC 2822: Thu, 07 Apr 2005 22:13:13 +0200.
* ISO 8601 2005-04-07T22:13:13
The T can be a space as well
:return: Tuple(int(timestamp_UTC), int(offset)), both in seconds since epoch
:raise ValueError: If the format could not be understood
:note: Date can also be YYYY.MM.DD, MM/DD/YYYY and DD.MM.YYYY.
"""
if isinstance(string_date, datetime):
if string_date.tzinfo:
utcoffset = cast(timedelta, string_date.utcoffset()) # typeguard, if tzinfoand is not None
offset = -int(utcoffset.total_seconds())
return int(string_date.astimezone(utc).timestamp()), offset
else:
raise ValueError(f"string_date datetime object without tzinfo, {string_date}")
# git time
try:
if string_date.count(' ') == 1 and string_date.rfind(':') == -1:
timestamp, offset_str = string_date.split()
if timestamp.startswith('@'):
timestamp = timestamp[1:]
timestamp_int = int(timestamp)
return timestamp_int, utctz_to_altz(verify_utctz(offset_str))
else:
offset_str = "+0000" # local time by default
if string_date[-5] in '-+':
offset_str = verify_utctz(string_date[-5:])
string_date = string_date[:-6] # skip space as well
# END split timezone info
offset = utctz_to_altz(offset_str)
# now figure out the date and time portion - split time
date_formats = []
splitter = -1
if ',' in string_date:
date_formats.append("%a, %d %b %Y")
splitter = string_date.rfind(' ')
else:
# iso plus additional
date_formats.append("%Y-%m-%d")
date_formats.append("%Y.%m.%d")
date_formats.append("%m/%d/%Y")
date_formats.append("%d.%m.%Y")
splitter = string_date.rfind('T')
if splitter == -1:
splitter = string_date.rfind(' ')
# END handle 'T' and ' '
# END handle rfc or iso
assert splitter > -1
# split date and time
time_part = string_date[splitter + 1:] # skip space
date_part = string_date[:splitter]
# parse time
tstruct = time.strptime(time_part, "%H:%M:%S")
for fmt in date_formats:
try:
dtstruct = time.strptime(date_part, fmt)
utctime = calendar.timegm((dtstruct.tm_year, dtstruct.tm_mon, dtstruct.tm_mday,
tstruct.tm_hour, tstruct.tm_min, tstruct.tm_sec,
dtstruct.tm_wday, dtstruct.tm_yday, tstruct.tm_isdst))
return int(utctime), offset
except ValueError:
continue
# END exception handling
# END for each fmt
# still here ? fail
raise ValueError("no format matched")
# END handle format
except Exception as e:
raise ValueError(f"Unsupported date format or type: {string_date}, type={type(string_date)}") from e
# END handle exceptions
# precompiled regex
_re_actor_epoch = re.compile(r'^.+? (.*) (\d+) ([+-]\d+).*$')
_re_only_actor = re.compile(r'^.+? (.*)$')
def parse_actor_and_date(line: str) -> Tuple[Actor, int, int]:
"""Parse out the actor (author or committer) info from a line like::
author Tom Preston-Werner <tom@mojombo.com> 1191999972 -0700
:return: [Actor, int_seconds_since_epoch, int_timezone_offset]"""
actor, epoch, offset = '', '0', '0'
m = _re_actor_epoch.search(line)
if m:
actor, epoch, offset = m.groups()
else:
m = _re_only_actor.search(line)
actor = m.group(1) if m else line or ''
return (Actor._from_string(actor), int(epoch), utctz_to_altz(offset))
#} END functions
#{ Classes
class ProcessStreamAdapter(object):
"""Class wireing all calls to the contained Process instance.
Use this type to hide the underlying process to provide access only to a specified
stream. The process is usually wrapped into an AutoInterrupt class to kill
it if the instance goes out of scope."""
__slots__ = ("_proc", "_stream")
def __init__(self, process: 'Popen', stream_name: str) -> None:
self._proc = process
self._stream: StringIO = getattr(process, stream_name) # guessed type
def __getattr__(self, attr: str) -> Any:
return getattr(self._stream, attr)
@runtime_checkable
class Traversable(Protocol):
"""Simple interface to perform depth-first or breadth-first traversals
into one direction.
Subclasses only need to implement one function.
Instances of the Subclass must be hashable
Defined subclasses = [Commit, Tree, SubModule]
"""
__slots__ = ()
@classmethod
@abstractmethod
def _get_intermediate_items(cls, item: Any) -> Sequence['Traversable']:
"""
Returns:
Tuple of items connected to the given item.
Must be implemented in subclass
class Commit:: (cls, Commit) -> Tuple[Commit, ...]
class Submodule:: (cls, Submodule) -> Iterablelist[Submodule]
class Tree:: (cls, Tree) -> Tuple[Tree, ...]
"""
raise NotImplementedError("To be implemented in subclass")
@abstractmethod
def list_traverse(self, *args: Any, **kwargs: Any) -> Any:
""" """
warnings.warn("list_traverse() method should only be called from subclasses."
"Calling from Traversable abstract class will raise NotImplementedError in 3.1.20"
"Builtin sublclasses are 'Submodule', 'Tree' and 'Commit",
DeprecationWarning,
stacklevel=2)
return self._list_traverse(*args, **kwargs)
def _list_traverse(self, as_edge: bool = False, *args: Any, **kwargs: Any
) -> IterableList[Union['Commit', 'Submodule', 'Tree', 'Blob']]:
"""
:return: IterableList with the results of the traversal as produced by
traverse()
Commit -> IterableList['Commit']
Submodule -> IterableList['Submodule']
Tree -> IterableList[Union['Submodule', 'Tree', 'Blob']]
"""
# Commit and Submodule have id.__attribute__ as IterableObj
# Tree has id.__attribute__ inherited from IndexObject
if isinstance(self, Has_id_attribute):
id = self._id_attribute_
else:
id = "" # shouldn't reach here, unless Traversable subclass created with no _id_attribute_
# could add _id_attribute_ to Traversable, or make all Traversable also Iterable?
if not as_edge:
out: IterableList[Union['Commit', 'Submodule', 'Tree', 'Blob']] = IterableList(id)
out.extend(self.traverse(as_edge=as_edge, *args, **kwargs))
return out
# overloads in subclasses (mypy does't allow typing self: subclass)
# Union[IterableList['Commit'], IterableList['Submodule'], IterableList[Union['Submodule', 'Tree', 'Blob']]]
else:
# Raise deprecationwarning, doesn't make sense to use this
out_list: IterableList = IterableList(self.traverse(*args, **kwargs))
return out_list
@ abstractmethod
def traverse(self, *args: Any, **kwargs: Any) -> Any:
""" """
warnings.warn("traverse() method should only be called from subclasses."
"Calling from Traversable abstract class will raise NotImplementedError in 3.1.20"
"Builtin sublclasses are 'Submodule', 'Tree' and 'Commit",
DeprecationWarning,
stacklevel=2)
return self._traverse(*args, **kwargs)
def _traverse(self,
predicate: Callable[[Union['Traversable', 'Blob', TraversedTup], int], bool] = lambda i, d: True,
prune: Callable[[Union['Traversable', 'Blob', TraversedTup], int], bool] = lambda i, d: False,
depth: int = -1, branch_first: bool = True, visit_once: bool = True,
ignore_self: int = 1, as_edge: bool = False
) -> Union[Iterator[Union['Traversable', 'Blob']],
Iterator[TraversedTup]]:
""":return: iterator yielding of items found when traversing self
:param predicate: f(i,d) returns False if item i at depth d should not be included in the result
:param prune:
f(i,d) return True if the search should stop at item i at depth d.
Item i will not be returned.
:param depth:
define at which level the iteration should not go deeper
if -1, there is no limit
if 0, you would effectively only get self, the root of the iteration
i.e. if 1, you would only get the first level of predecessors/successors
:param branch_first:
if True, items will be returned branch first, otherwise depth first
:param visit_once:
if True, items will only be returned once, although they might be encountered
several times. Loops are prevented that way.
:param ignore_self:
if True, self will be ignored and automatically pruned from
the result. Otherwise it will be the first item to be returned.
If as_edge is True, the source of the first edge is None
:param as_edge:
if True, return a pair of items, first being the source, second the
destination, i.e. tuple(src, dest) with the edge spanning from
source to destination"""
"""
Commit -> Iterator[Union[Commit, Tuple[Commit, Commit]]
Submodule -> Iterator[Submodule, Tuple[Submodule, Submodule]]
Tree -> Iterator[Union[Blob, Tree, Submodule,
Tuple[Union[Submodule, Tree], Union[Blob, Tree, Submodule]]]
ignore_self=True is_edge=True -> Iterator[item]
ignore_self=True is_edge=False --> Iterator[item]
ignore_self=False is_edge=True -> Iterator[item] | Iterator[Tuple[src, item]]
ignore_self=False is_edge=False -> Iterator[Tuple[src, item]]"""
visited = set()
stack: Deque[TraverseNT] = deque()
stack.append(TraverseNT(0, self, None)) # self is always depth level 0
def addToStack(stack: Deque[TraverseNT],
src_item: 'Traversable',
branch_first: bool,
depth: int) -> None:
lst = self._get_intermediate_items(item)
if not lst: # empty list
return None
if branch_first:
stack.extendleft(TraverseNT(depth, i, src_item) for i in lst)
else:
reviter = (TraverseNT(depth, lst[i], src_item) for i in range(len(lst) - 1, -1, -1))
stack.extend(reviter)
# END addToStack local method
while stack:
d, item, src = stack.pop() # depth of item, item, item_source
if visit_once and item in visited:
continue
if visit_once:
visited.add(item)
rval: Union[TraversedTup, 'Traversable', 'Blob']
if as_edge: # if as_edge return (src, item) unless rrc is None (e.g. for first item)
rval = (src, item)
else:
rval = item
if prune(rval, d):
continue
skipStartItem = ignore_self and (item is self)
if not skipStartItem and predicate(rval, d):
yield rval
# only continue to next level if this is appropriate !
nd = d + 1
if depth > -1 and nd > depth:
continue
addToStack(stack, item, branch_first, nd)
# END for each item on work stack
@ runtime_checkable
class Serializable(Protocol):
"""Defines methods to serialize and deserialize objects from and into a data stream"""
__slots__ = ()
# @abstractmethod
def _serialize(self, stream: 'BytesIO') -> 'Serializable':
"""Serialize the data of this object into the given data stream
:note: a serialized object would ``_deserialize`` into the same object
:param stream: a file-like object
:return: self"""
raise NotImplementedError("To be implemented in subclass")
# @abstractmethod
def _deserialize(self, stream: 'BytesIO') -> 'Serializable':
"""Deserialize all information regarding this object from the stream
:param stream: a file-like object
:return: self"""
raise NotImplementedError("To be implemented in subclass")
class TraversableIterableObj(IterableObj, Traversable):
__slots__ = ()
TIobj_tuple = Tuple[Union[T_TIobj, None], T_TIobj]
def list_traverse(self: T_TIobj, *args: Any, **kwargs: Any) -> IterableList[T_TIobj]:
return super(TraversableIterableObj, self)._list_traverse(* args, **kwargs)
@ overload # type: ignore
def traverse(self: T_TIobj
) -> Iterator[T_TIobj]:
...
@ overload
def traverse(self: T_TIobj,
predicate: Callable[[Union[T_TIobj, Tuple[Union[T_TIobj, None], T_TIobj]], int], bool],
prune: Callable[[Union[T_TIobj, Tuple[Union[T_TIobj, None], T_TIobj]], int], bool],
depth: int, branch_first: bool, visit_once: bool,
ignore_self: Literal[True],
as_edge: Literal[False],
) -> Iterator[T_TIobj]:
...
@ overload
def traverse(self: T_TIobj,
predicate: Callable[[Union[T_TIobj, Tuple[Union[T_TIobj, None], T_TIobj]], int], bool],
prune: Callable[[Union[T_TIobj, Tuple[Union[T_TIobj, None], T_TIobj]], int], bool],
depth: int, branch_first: bool, visit_once: bool,
ignore_self: Literal[False],
as_edge: Literal[True],
) -> Iterator[Tuple[Union[T_TIobj, None], T_TIobj]]:
...
@ overload
def traverse(self: T_TIobj,
predicate: Callable[[Union[T_TIobj, TIobj_tuple], int], bool],
prune: Callable[[Union[T_TIobj, TIobj_tuple], int], bool],
depth: int, branch_first: bool, visit_once: bool,
ignore_self: Literal[True],
as_edge: Literal[True],
) -> Iterator[Tuple[T_TIobj, T_TIobj]]:
...
def traverse(self: T_TIobj,
predicate: Callable[[Union[T_TIobj, TIobj_tuple], int],
bool] = lambda i, d: True,
prune: Callable[[Union[T_TIobj, TIobj_tuple], int],
bool] = lambda i, d: False,
depth: int = -1, branch_first: bool = True, visit_once: bool = True,
ignore_self: int = 1, as_edge: bool = False
) -> Union[Iterator[T_TIobj],
Iterator[Tuple[T_TIobj, T_TIobj]],
Iterator[TIobj_tuple]]:
"""For documentation, see util.Traversable._traverse()"""
"""
# To typecheck instead of using cast.
import itertools
from git.types import TypeGuard
def is_commit_traversed(inp: Tuple) -> TypeGuard[Tuple[Iterator[Tuple['Commit', 'Commit']]]]:
for x in inp[1]:
if not isinstance(x, tuple) and len(x) != 2:
if all(isinstance(inner, Commit) for inner in x):
continue
return True
ret = super(Commit, self).traverse(predicate, prune, depth, branch_first, visit_once, ignore_self, as_edge)
ret_tup = itertools.tee(ret, 2)
assert is_commit_traversed(ret_tup), f"{[type(x) for x in list(ret_tup[0])]}"
return ret_tup[0]
"""
return cast(Union[Iterator[T_TIobj],
Iterator[Tuple[Union[None, T_TIobj], T_TIobj]]],
super(TraversableIterableObj, self)._traverse(
predicate, prune, depth, branch_first, visit_once, ignore_self, as_edge # type: ignore
))