This file is indexed.

/usr/lib/python2.7/dist-packages/charmtools/repofinder.py is in charm-tools 2.1.2-0ubuntu4.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import re
import subprocess

from collections import namedtuple

from . import utils


def get_recommended_repo(path):
    """Given vcs directory ``path``, returns the url from which the repo
    can be cloned.

    For git, an 'upstream' remote will be preferred over 'origin'.
    For bzr, the :parent: branch will be preferred.
    For hg, the 'default' alias will be preferred.

    Returns None if the directory is not a repo, or a remote url can not
    be determined.

    :param path: A :class:`path.path` to a directory
    :return: A url string, or None

    """

    Command = namedtuple("Command", "args parse")
    cmds = [
        Command(['git', 'remote', '-v'], _parse_git),
        Command(['bzr', 'info'], _parse_bzr),
        Command(['hg', 'paths'], _parse_hg),
    ]

    if not path.exists():
        return None

    with utils.cd(str(path)):
        for cmd in cmds:
            try:
                output = subprocess.check_output(cmd.args)
                if output:
                    repo = cmd.parse(output)
                    if repo:
                        return repo
            except (subprocess.CalledProcessError, OSError):
                continue


def _parse_git(txt):
    pat = re.compile(
        r'(?P<name>\S+)\s+(?P<url>\S+)\s+\((?P<type>[^\)]+)\)')
    urls = {}
    for line in txt.split('\n'):
        match = pat.search(line)
        if match:
            d = match.groupdict()
            if d['name'] == 'upstream' and d['type'] == 'fetch':
                return d['url'].strip()
            elif d['type'] == 'fetch':
                urls[d['name']] = d['url'].strip()

    if 'origin' in urls:
        return urls['origin']

    for url in urls.values():
        return url


def _parse_bzr(txt):
    branch_types = ['parent', 'push', 'submit']
    pat = re.compile(
        r'(?P<branch_type>({})) branch: (?P<url>.*)'.format(
            '|'.join(branch_types)))
    matches = {}
    for line in txt.split('\n'):
        match = pat.search(line)
        if match:
            d = match.groupdict()
            matches[d['branch_type']] = d['url'].strip()
    if not matches:
        return
    for typ in branch_types:
        url = matches.get(typ)
        if url:
            return url


def _parse_hg(txt):
    pat = re.compile(r'(?P<name>[^\s]+) = (?P<url>.*)')
    urls = []
    for line in txt.split('\n'):
        match = pat.search(line)
        if match:
            d = match.groupdict()
            if d['name'] == 'default':
                return d['url'].strip()
            else:
                urls.append(d['url'].strip())
    return urls[0] if urls else None