sourcedr: Remove incomplete sourcedr

This commit removes the incomplete source dependency review tool.

Test: n/a
Change-Id: I5f3b98aace3198262f35b45db552d64828533707
This commit is contained in:
Logan Chien
2018-06-21 16:52:21 +08:00
parent 8f6fbbc467
commit 8ab92e5685
39 changed files with 6 additions and 3179 deletions

View File

@@ -1,64 +1,8 @@
# Source Deps Reviewer
# Source Dr.
## Synopsis
This is a collection of source tree analysis tools.
This is a tool for labeling dependencies with a web interface.
Basically, it greps the specified directory for the given pattern,
and let human reviewers label their dependencies, even code dependencies,
which are code segments that are highly related to the specific pattern.
## Installation and Dependencies
This tool depends on [codesearch](https://github.com/google/codesearch)
to generate regular expression index, please install them with:
```
$ go get github.com/google/codesearch/cmd/cindex
$ go get github.com/google/codesearch/cmd/csearch
```
This tool depends on several Python packages,
```
$ pip install -e .
```
To run functional test, please do
```
$ pip install -e .[dev]
```
Prism, a code syntax highlighter is used.
It can be found at https://github.com/PrismJS/prism
## Usage
Initialize a project:
```
sourcedr init --source-dir [android-src] [project-dir]
```
Scan the codebase:
```
sourcedr scan
```
If there are occurrences that are not reviewed, then review the occurrences
with:
```
sourcedr review
```
Open browser and visit [http://localhost:5000](http://localhost:5000).
## Testing
```
$ python3 sourcedr/functional_tests.py
```
- [blueprint](blueprint) analyzes Android.bp and the dependencies between the
modules.
- [ninja](ninja) analyzes `$(OUT)/combined-${target}.ninja`, which contains all
file dependencies.

View File

@@ -1,2 +0,0 @@
flask
Flask-testing

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env python3
"""Unit tests and functional tests runner."""
import argparse
import os
import unittest
TESTS_DIR = os.path.join(os.path.dirname(__file__), 'sourcedr', 'tests')
def main():
""" Find and run unit tests and functional tests."""
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
verbosity = 2 if args.verbose else 1
loader = unittest.TestLoader()
tests = loader.discover(TESTS_DIR, 'test_*.py')
runner = unittest.runner.TextTestRunner(verbosity=verbosity)
runner.run(tests)
if __name__ == '__main__':
main()

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env python3
from setuptools import setup
setup(
name='sourcedr',
version='1.0',
description='Shared Libs Deps Review Tool',
url='https://android.googlesource.com/platform/development/+'
'/master/vndk/tools/source-deps-reviewer/',
packages=[
'blueprint'
'ninja',
'sourcedr',
],
package_data={
'sourcedr': [
'defaults/pattern_db.csv',
'defaults/sourcedr.json',
'static/css/main.css',
'static/js/main.js',
'static/prism/css/prism.css',
'static/prism/js/prism.js',
],
},
install_requires=['flask'],
extras_require={
'dev': [
'flask_testing'
],
},
entry_points={
'console_scripts': [
'sourcedr = sourcedr.commands:main',
],
}
)

View File

@@ -1 +0,0 @@
#!/usr/bin/env python3

View File

@@ -1,324 +0,0 @@
#!/usr/bin/env python3
"""Code indexing and searching utilities.
This module will build n-gram file index with codesearch_ and use the index as
a bloom filter to find the regular expression pattern.
In addition, language-specific filters are provided to ignore matchings in
string literals or comments in the source code.
.. _codesearch: https://github.com/google/codesearch
"""
import collections
import os
import re
import subprocess
class ClikeFilter(object):
def __init__(self, skip_literals=True, skip_comments=True):
self.file_exts = (b'.c', b'.cpp', b'.cc', b'.cxx', b'.h', b'.hpp',
b'.hxx', b'.java')
self.skip_literals = skip_literals
self.skip_comments = skip_comments
def process(self, code):
if self.skip_comments:
# Remove // comments.
code = re.sub(b'//[^\\r\\n]*[\\r\\n]', b'', code)
# Remove matched /* */ comments.
code = re.sub(b'/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/', b'', code)
if self.skip_literals:
# Remove matching quotes.
code = re.sub(b'"(?:\\\\?.)*?"', b'', code)
code = re.sub(b'\'(?:\\\\?.)*?\'', b'', code)
return code
def get_span(self, code):
span = []
if self.skip_comments:
# Remove // comments.
p = re.compile(b'//[^\\r\\n]*[\\r\\n]')
for m in p.finditer(code):
span.append(m.span())
# Remove matched /* */ comments.
p = re.compile(b'/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/')
for m in p.finditer(code):
span.append(m.span())
if self.skip_literals:
# Remove matching quotes.
p = re.compile(b'"(?:\\\\?.)*?"')
for m in p.finditer(code):
span.append(m.span())
p = re.compile(b'\'(?:\\\\?.)*?\'')
for m in p.finditer(code):
span.append(m.span())
return span
class PyFilter(object):
def __init__(self, skip_literals=True, skip_comments=True):
self.file_exts = (b'.py',)
self.skip_literals = skip_literals
self.skip_comments = skip_comments
def process(self, code):
if self.skip_comments:
# Remove # comments
code = re.sub(b'#[^\\r\\n]*[\\r\\n]', b'', code)
if self.skip_literals:
# Remove matching quotes.
code = re.sub(b'"(?:\\\\?.)*?"', b'', code)
code = re.sub(b'\'(?:\\\\?.)*?\'', b'', code)
return code
def get_span(self, code):
span = []
if self.skip_comments:
# Remove # comments.
p = re.compile(b'#[^\\r\\n]*[\\r\\n]')
for m in p.finditer(code):
span.append(m.span())
if self.skip_literals:
# Remove matching quotes.
p = re.compile(b'"(?:\\\\?.)*?"')
for m in p.finditer(code):
span.append(m.span())
p = re.compile(b'\'(?:\\\\?.)*?\'')
for m in p.finditer(code):
span.append(m.span())
return span
class AssemblyFilter(object):
def __init__(self, skip_literals=True, skip_comments=True):
self.file_exts = (b'.s', b'.S')
self.skip_literals = skip_literals
self.skip_comments = skip_comments
def process(self, code):
if self.skip_comments:
# Remove @ comments
code = re.sub(b'@[^\\r\\n]*[\\r\\n]', b'', code)
# Remove // comments.
code = re.sub(b'//[^\\r\\n]*[\\r\\n]', b'', code)
# Remove matched /* */ comments.
code = re.sub(b'/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/', b'', code)
return code
def get_span(self, code):
span = []
if self.skip_comments:
# Remove # comments.
p = re.compile(b'@[^\\r\\n]*[\\r\\n]')
for m in p.finditer(code):
span.append(m.span())
# Remove // comments
p = re.compile(b'//[^\\r\\n]*[\\r\\n]')
for m in p.finditer(code):
span.append(m.span())
# Remove matched /* */ comments
p = re.compile(b'/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/')
for m in p.finditer(code):
span.append(m.span())
return span
class MkFilter(object):
def __init__(self, skip_literals=True, skip_comments=True):
self.file_exts = (b'.mk',)
self.skip_literals = skip_literals
self.skip_comments = skip_comments
def process(self, code):
if self.skip_comments:
# Remove # comments
code = re.sub(b'#[^\\r\\n]*[\\r\\n]', b'', code)
return code
def get_span(self, code):
span = []
if self.skip_comments:
# Remove # comments.
p = re.compile(b'#[^\\r\\n]*[\\r\\n]')
for m in p.finditer(code):
span.append(m.span())
return span
class BpFilter(object):
def __init__(self, skip_literals=True, skip_comments=True):
self.file_exts = (b'.bp',)
self.skip_literals = skip_literals
self.skip_comments = skip_comments
def process(self, code):
if self.skip_comments:
# Remove // comments
code = re.sub(b'//[^\\r\\n]*[\\r\\n]', b'', code)
return code
def get_span(self, code):
span = []
if self.skip_comments:
# Remove // comments.
p = re.compile(b'//[^\\r\\n]*[\\r\\n]')
for m in p.finditer(code):
span.append(m.span())
return span
class PathFilter(object):
def __init__(self, file_ext_black_list=tuple(),
file_name_black_list=tuple(),
path_component_black_list=tuple()):
self.file_ext_black_list = set(
x.encode('utf-8') for x in file_ext_black_list)
self.file_name_black_list = set(
x.encode('utf-8') for x in file_name_black_list)
self.path_component_black_list = set(
x.encode('utf-8') for x in path_component_black_list)
def should_skip(self, path):
file_name = os.path.basename(path)
file_ext = os.path.splitext(file_name)[1]
if file_ext.lower() in self.file_ext_black_list:
return True
if file_name in self.file_name_black_list:
return True
return any(patt in path for patt in self.path_component_black_list)
class CodeSearch(object):
DEFAULT_NAME = 'csearchindex'
@classmethod
def get_default_path(cls, project_dir):
return os.path.join(project_dir, 'tmp', cls.DEFAULT_NAME)
def __init__(self, root_dir, index_file_path, path_filter=None):
self.path = os.path.abspath(index_file_path)
self._root_dir = os.path.abspath(root_dir)
self._env = dict(os.environ)
self._env['CSEARCHINDEX'] = self.path
self._filters = {}
self._path_filter = PathFilter() if path_filter is None else path_filter
def _run_cindex(self, options):
subprocess.check_call(['cindex'] + options, env=self._env,
cwd=self._root_dir, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def _run_csearch(self, options):
if not os.path.exists(self.path):
raise ValueError('Failed to find ' + self.path)
return subprocess.check_output(['csearch'] + options, env=self._env,
cwd=self._root_dir,
stderr=subprocess.DEVNULL)
def add_filter(self, lang_filter):
for ext in lang_filter.file_exts:
self._filters[ext] = lang_filter
def add_default_filters(self, skip_literals=True, skip_comments=True):
self.add_filter(ClikeFilter(skip_literals, skip_comments))
self.add_filter(AssemblyFilter(skip_literals, skip_comments))
self.add_filter(PyFilter(skip_literals, skip_comments))
self.add_filter(MkFilter(skip_literals, skip_comments))
self.add_filter(BpFilter(skip_literals, skip_comments))
def build_index(self, remove_existing_index=True):
if remove_existing_index and os.path.exists(self.path):
with contextlib.suppress(FileNotFoundError):
os.remove(self.path)
os.makedirs(os.path.dirname(self.path), exist_ok=True)
self._run_cindex([self._root_dir])
def _sanitize_code(self, file_path):
with open(file_path, 'rb') as f:
code = f.read()
file_name = os.path.basename(file_path)
f, ext = os.path.splitext(file_name)
try:
code = self._filters[ext].process(code)
except KeyError:
pass
return code
def _remove_prefix(self, raw_grep):
ret = b''
patt = re.compile(b'([^:]+):(\\d+):(.*)$')
for line in raw_grep.split(b'\n'):
match = patt.match(line)
if not match:
continue
file_path = os.path.relpath(match.group(1),
self._root_dir.encode('utf-8'))
line_no = match.group(2)
code = match.group(3)
ret += file_path + b':' + line_no + b':' + code + b'\n'
return ret
def process_grep(self, raw_grep, pattern, is_regex):
pattern = pattern.encode('utf-8')
if not is_regex:
pattern = re.escape(pattern)
# Limit pattern not to match exceed a line
# Since grep may get multiple patterns in a single entry
pattern = re.compile(pattern + b'[^\\n\\r]*(?:\\n|\\r|$)')
patt = re.compile(b'([^:]+):(\\d+):(.*)$')
suspect = collections.defaultdict(list)
for line in raw_grep.split(b'\n'):
match = patt.match(line)
if not match:
continue
file_path = match.group(1)
line_no = match.group(2)
code = match.group(3)
if self._path_filter.should_skip(file_path):
continue
abs_file_path = os.path.join(self._root_dir.encode('utf-8'),
file_path)
# Check if any pattern can be found after sanitize_code
if not pattern.search(self._sanitize_code(abs_file_path)):
continue
suspect[abs_file_path].append((file_path, line_no, code))
suspect = sorted(suspect.items())
processed = b''
for file_path, entries in suspect:
with open(file_path, 'rb') as f:
code = f.read()
# deep filter
file_name = os.path.basename(file_path)
f, ext = os.path.splitext(file_name)
try:
span = self._filters[ext].get_span(code)
except KeyError:
span = []
matchers = [m for m in pattern.finditer(code)]
for i, matcher in enumerate(matchers):
if not span or all(span_ent[0] > matcher.start() or
span_ent[1] <= matcher.start()
for span_ent in span):
processed += (entries[i][0] + b':' +
entries[i][1] + b':' +
entries[i][2] + b'\n')
return processed
def raw_grep(self, pattern):
try:
return self._remove_prefix(self._run_csearch(['-n', pattern]))
except subprocess.CalledProcessError as e:
if e.output == b'':
return b''
raise
def raw_search(self, pattern, is_regex):
if not is_regex:
pattern = re.escape(pattern)
return self.raw_grep(pattern)

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env python3
"""Parser for command line options."""
import argparse
import sys
from sourcedr.commands import collect, init, scan, review
def main():
"""Register sub-commands, parse command line options, and delegate."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcmd')
commands = {}
def _register_subcmd(name, init_argparse):
commands[name] = init_argparse(subparsers)
_register_subcmd('init', init.init_argparse)
_register_subcmd('scan', scan.init_argparse)
_register_subcmd('review', review.init_argparse)
_register_subcmd('collect', collect.init_argparse)
args = parser.parse_args()
try:
func = commands[args.subcmd]
except KeyError:
parser.print_help()
sys.exit(1)
sys.exit(func(args))
if __name__ == '__main__':
main()

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env python3
"""`sourcedr collect` command."""
import json
import os
from sourcedr.project import Project
from sourcedr.map import (
link_build_dep_and_review_data, load_build_dep_ninja, load_review_data)
def init_argparse(parsers):
"""Initialize argument parser for `sourcedr collect`."""
parser = parsers.add_parser('collect', help='Open web-based review UI')
parser.add_argument('input', help='Ninja file')
parser.add_argument('--ninja-deps')
parser.add_argument('--project-dir', default='.')
parser.add_argument('-o', '--output', required=True)
return run
def run(args):
project_dir = os.path.expanduser(args.project_dir)
project = Project(project_dir)
# Load build dependency file
try:
dep = load_build_dep_ninja(args.input, project.source_dir,
args.ninja_deps)
except IOError:
print('error: Failed to open build dependency file:', args.input,
file=sys.stderr)
sys.exit(1)
# Load review data
table = load_review_data(project.review_db.path)
# Link build dependency file and review data
res = link_build_dep_and_review_data(dep, table)
# Write the output file
with open(args.output, 'w') as f:
json.dump(res, f, sort_keys=True, indent=4)
if __name__ == '__main__':
main()

View File

@@ -1,35 +0,0 @@
#!/usr/bin/env python3
"""`sourcedr init` command."""
import os
import sys
from sourcedr.project import Project
def _is_dir_empty(path):
"""Determine whether the given path is an empty directory."""
return len(os.listdir(path)) == 0
def init_argparse(parsers):
"""Initialize argument parser for `sourcedr init`."""
parser = parsers.add_parser('init', help='Start a new review project')
parser.add_argument('--project-dir', default='.')
parser.add_argument('--android-root', required=True,
help='Android source tree root directory')
return run
def run(args):
"""Main function for `sourcedr init`."""
if args.project_dir == '.' and not _is_dir_empty(args.project_dir):
print('error: Current working directory is not an empty directory.',
file=sys.stderr)
project_dir = os.path.expanduser(args.project_dir)
source_dir = os.path.expanduser(args.android_root)
Project.get_or_create_project_dir(project_dir, source_dir)

View File

@@ -1,29 +0,0 @@
#!/usr/bin/env python3
"""`sourcedr review` command."""
import os
from sourcedr.project import Project
from sourcedr.server import create_app
def init_argparse(parsers):
"""Initialize argument parser for `sourcedr init`."""
parser = parsers.add_parser('review', help='Open web-based review UI')
parser.add_argument('--project-dir', default='.')
parser.add_argument('--rebuild-csearch-index', action='store_true',
help='Re-build the existing csearch index file')
return run
def run(args):
"""Main function for `sourcedr init`."""
project_dir = os.path.expanduser(args.project_dir)
project = Project(project_dir)
project.update_csearch_index(args.rebuild_csearch_index)
project.update_review_db()
app = create_app(project)
app.run()

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env python3
"""`sourcedr scan` command."""
def init_argparse(parsers):
"""Initialize argument parser for `sourcedr scan`."""
parsers.add_parser('scan', help='Scan all pattern occurrences')
return run
def run(_):
"""Main function for `sourcedr scan`."""
print('error: Need human review. Run: `sourcedr review`')

View File

@@ -1 +0,0 @@
1,\bdlopen\b
1 1 \bdlopen\b

View File

@@ -1,37 +0,0 @@
{
"source_dir": "",
"file_ext_blacklist": [
".1",
".ac",
".cmake",
".html",
".info",
".la",
".m4",
".map",
".md",
".py",
".rst",
".sh",
".sym",
".txt",
".xml"
],
"file_name_blacklist": [
"CHANGES.0",
"ChangeLog",
"config.h.in",
"configure",
"configure.in",
"configure.linaro",
"libtool"
],
"path_component_blacklist": [
".git",
".repo",
"autom4te.cache",
"binutils",
"dejagnu",
"llvm/Config/Config"
]
}

View File

@@ -1,90 +0,0 @@
#!/usr/bin/env python3
"""This command maps source file review results to compiled binaries.
"""
import argparse
import collections
import itertools
import json
import os
import sys
from ninja import ninja
from sourcedr.review_db import ReviewDB
def load_build_dep_graph(graph):
# Collect all shared libraries
shared_libs = set()
for key, value in graph.items():
if key.split('.')[-1] == 'so':
shared_libs.add(key)
for v in value:
if v.split('.')[-1] == 'so':
shared_libs.add(v)
# Collect transitive closures
dep = {}
for s in shared_libs:
visited = set()
stack = [s]
while stack:
v = stack.pop()
if v not in visited:
visited.add(v)
try:
stack.extend(x for x in graph[v]
if x not in visited and not x.endswith('.so')
and not x.endswith('.toc'))
except KeyError:
pass
visited.remove(s)
dep[s] = visited
return dep
def load_build_dep_ninja(ninja_path, work_dir, ninja_deps=None):
manifest = ninja.Parser().parse(ninja_path, 'utf-8', ninja_deps)
graph = collections.defaultdict(set)
for build in manifest.builds:
for path in itertools.chain(build.explicit_outs, build.implicit_outs):
ins = graph[path]
ins.update(build.explicit_ins)
ins.update(build.implicit_ins)
ins.update(build.depfile_implicit_ins)
return load_build_dep_graph(graph)
def load_build_dep_file(fp):
return load_build_dep_graph(json.load(fp))
def load_build_dep_file_from_path(path):
with open(path, 'r') as fp:
return load_build_dep_file(fp)
def load_review_data(path):
table = collections.defaultdict(list)
review_db = ReviewDB(path, None)
for key, item in review_db.data.items():
table[key.split(':')[0]] += item[0]
return table
def link_build_dep_and_review_data(dep, table):
res = collections.defaultdict(list)
for out, ins in dep.items():
try:
res[out] += table[out]
except KeyError:
pass
for in_file in ins:
try:
res[out] += table[in_file]
except KeyError:
pass
return res

View File

@@ -1,44 +0,0 @@
#!/usr/bin/env python3
import os
class PatternDB(object):
"""Pattern database for patterns to be searched in the source tree.
"""
DEFAULT_NAME = 'pattern_db.csv'
@classmethod
def get_default_path(cls, project_dir):
return os.path.join(project_dir, cls.DEFAULT_NAME)
def __init__(self, path):
self.path = path
self.data = self._load()
def _load(self):
with open(self.path, 'r') as f:
patterns = []
is_regexs = []
for line in f:
line = line.rstrip('\n')
sp = line.split(',')
is_regexs.append(sp[0])
patterns.append(','.join(sp[1:]))
return (patterns, is_regexs)
def load(self):
self.data = self._load()
return self.data
def save_new_pattern(self, patt, is_regex):
"""Add a pattern to the database."""
with open(self.path, 'a') as f:
f.write(str(int(is_regex)) + ',' + patt + '\n')

View File

@@ -1,173 +0,0 @@
#!/usr/bin/env python3
"""SourceDR project configurations and databases.
`Project` class holds configuration files, review databases, pattern databases,
and `codesearch` index files.
"""
import collections
import json
import os
import shutil
from sourcedr.codesearch import CodeSearch, PathFilter
from sourcedr.pattern_db import PatternDB
from sourcedr.review_db import ReviewDB
from sourcedr.utils import LockedFile
class Config(object):
"""SourceDR project configuration file."""
DEFAULT_NAME = 'sourcedr.json'
_PATH_TRAVERSAL_ATTRS = (
'file_ext_blacklist', 'file_name_blacklist',
'path_component_blacklist')
@classmethod
def get_default_path(cls, project_dir):
"""Get the default path of the configuration file under a project
directory."""
return os.path.join(project_dir, cls.DEFAULT_NAME)
def __init__(self, path):
self.path = path
self.source_dir = None
self.file_ext_blacklist = set()
self.file_name_blacklist = set()
self.path_component_blacklist = set()
def load(self):
"""Load the project configuration from the JSON file."""
with open(self.path, 'r') as config_fp:
config_json = json.load(config_fp)
for key, value in config_json.items():
if key == 'source_dir':
self.source_dir = value
elif key in self._PATH_TRAVERSAL_ATTRS:
setattr(self, key, set(value))
else:
raise ValueError('unknown config name: ' + key)
def save(self):
"""Save the project configuration to the JSON file."""
with LockedFile(self.path, 'x') as config_fp:
config = collections.OrderedDict()
config['source_dir'] = self.source_dir
for key in self._PATH_TRAVERSAL_ATTRS:
config[key] = sorted(getattr(self, key))
json.dump(config, config_fp, indent=2)
class Project(object):
"""SourceDR project configuration files and databases."""
def __init__(self, project_dir):
"""Load a project from a given project directory."""
project_dir = os.path.abspath(project_dir)
self.project_dir = project_dir
if not os.path.isdir(project_dir):
raise ValueError('project directory not found: ' + project_dir)
# Load configuration files
config_path = Config.get_default_path(project_dir)
self.config = Config(config_path)
self.config.load()
# Recalculate source directory
self.source_dir = os.path.abspath(
os.path.join(project_dir, self.config.source_dir))
# csearchindex file
path_filter = PathFilter(self.config.file_ext_blacklist,
self.config.file_name_blacklist,
self.config.path_component_blacklist)
csearch_index_path = CodeSearch.get_default_path(project_dir)
self.codesearch = CodeSearch(self.source_dir, csearch_index_path,
path_filter)
self.codesearch.add_default_filters()
# Review database file
review_db_path = ReviewDB.get_default_path(project_dir)
self.review_db = ReviewDB(review_db_path, self.codesearch)
# Pattern database file
pattern_db_path = PatternDB.get_default_path(project_dir)
self.pattern_db = PatternDB(pattern_db_path)
# Sanity checks
self._check_source_dir()
self._check_lock_files()
def update_csearch_index(self, remove_existing_index):
"""Create or update codesearch index."""
self.codesearch.build_index(remove_existing_index)
def update_review_db(self):
"""Update the entries in the review database."""
patterns, is_regexs = self.pattern_db.load()
self.review_db.find(patterns, is_regexs)
def _check_source_dir(self):
"""Check the availability of the source directory."""
if not os.path.isdir(self.source_dir):
raise ValueError('source directory not found: ' + self.source_dir)
def _check_lock_files(self):
"""Check whether there are some lock files."""
for path in (self.config.path, self.review_db.path,
self.pattern_db.path):
if LockedFile.is_locked(path):
raise ValueError('file locked: ' + path)
@classmethod
def create_project_dir(cls, project_dir, source_dir):
"""Create a directory for a new project and setup default
configurations."""
if not os.path.isdir(source_dir):
raise ValueError('source directory not found: ' + source_dir)
os.makedirs(project_dir, exist_ok=True)
# Compute the relative path between project_dir and source_dir
project_dir = os.path.abspath(project_dir)
source_dir = os.path.relpath(os.path.abspath(source_dir), project_dir)
# Copy default files
defaults_dir = os.path.join(os.path.dirname(__file__), 'defaults')
for name in (Config.DEFAULT_NAME, PatternDB.DEFAULT_NAME):
shutil.copyfile(os.path.join(defaults_dir, name),
os.path.join(project_dir, name))
# Update the source directory in the configuration file
config_path = Config.get_default_path(project_dir)
config = Config(config_path)
config.load()
config.source_dir = source_dir
config.save()
return Project(project_dir)
@classmethod
def get_or_create_project_dir(cls, project_dir, source_dir):
config_file_path = Config.get_default_path(project_dir)
if os.path.exists(config_file_path):
return Project(project_dir)
else:
return cls.create_project_dir(project_dir, source_dir)

View File

@@ -1,94 +0,0 @@
#!/usr/bin/env python3
import json
import os
import re
class ReviewDB(object):
DEFAULT_NAME = 'review_db.json'
@classmethod
def get_default_path(cls, project_dir):
return os.path.join(project_dir, cls.DEFAULT_NAME)
def __init__(self, path, codesearch):
self.path = path
self._cs = codesearch
try:
self.data = self._load_data()
except FileNotFoundError:
self.data = {}
# patterns and is_regexs are lists
def find(self, patterns, is_regexs):
# they shouldn't be empty
assert patterns and is_regexs
processed = b''
for pattern, is_regex in zip(patterns, is_regexs):
if not is_regex:
pattern = re.escape(pattern)
raw_grep = self._cs.raw_grep(pattern)
if raw_grep == b'':
continue
processed += self._cs.process_grep(raw_grep, pattern, is_regex)
self.to_json(processed)
def add_pattern(self, pattern, is_regex):
if not is_regex:
pattern = re.escape(pattern)
raw_grep = self._cs.raw_grep(pattern)
if raw_grep == b'':
return
processed = self._cs.process_grep(raw_grep, pattern, is_regex)
self.add_to_json(processed)
def to_json(self, processed):
data = {}
patt = re.compile('([^:]+):(\\d+):(.*)$')
for line in processed.decode('utf-8').split('\n'):
match = patt.match(line)
if not match:
continue
data[line] = ([], [])
# if old data exists, perform merge
if os.path.exists(self.path):
data.update(self._load_data())
self._save_data(data)
self.data = self._load_data()
def add_to_json(self, processed):
# Load all matched grep.
data = self._load_data()
patt = re.compile('([^:]+):(\\d+):(.*)$')
for line in processed.decode('utf-8').split('\n'):
match = patt.match(line)
if not match:
continue
data[line] = ([], [])
self._save_data(data)
self.data = self._load_data()
def add_label(self, label, deps, codes):
self.data[label] = (deps, codes)
self._save_data(self.data)
def _save_data(self, data):
with open(self.path, 'w') as data_fp:
json.dump(data, data_fp, sort_keys=True, indent=4)
def _load_data(self):
with open(self.path, 'r') as data_fp:
return json.load(data_fp)

View File

@@ -1,165 +0,0 @@
#!/usr/bin/env python3
import collections
import functools
import json
import os
import re
from flask import (
Blueprint, Flask, current_app, jsonify, render_template, request)
codereview = Blueprint('codereview', '__name__', 'templates')
# whether the code segment is exactly in file
def same(fl, code, source_dir):
fl = os.path.join(source_dir, fl)
with open(fl, 'r') as f:
fc = f.read()
return code in fc
# check if the file needes to be reiewed again
def check(codes, source_dir):
ret = []
for item in codes:
fl = item.split(':')[0]
code = item[len(fl) + 1:]
ret.append(same(fl, code, source_dir))
return ret
@codereview.route('/get_started')
def _get_started():
project = current_app.config.project
source_dir = project.source_dir
review_db = project.review_db
lst, done= [], []
for key, item in sorted(review_db.data.items()):
lst.append(key)
if item[0]:
done.append(all(check(item[1], source_dir)))
else:
done.append(False)
pattern_lst = project.pattern_db.load()[0]
abs_path = os.path.abspath(source_dir)
return jsonify(lst=json.dumps(lst),
done=json.dumps(done),
pattern_lst=json.dumps(pattern_lst),
path_prefix=os.path.join(abs_path, ''))
@codereview.route('/load_file')
def _load_file():
project = current_app.config.project
source_dir = project.source_dir
review_db = project.review_db
path = request.args.get('path')
if path not in review_db.data.keys():
print('No such entry', path)
return jsonify(result='')
deps, codes = review_db.data[path]
return jsonify(deps=json.dumps(deps), codes=json.dumps(codes),
okays=json.dumps(check(codes, source_dir)))
@codereview.route('/get_file')
def _get_file():
path = request.args.get('path')
path = os.path.join(current_app.config.project.source_dir, path)
if not os.path.exists(path):
return jsonify(result='No such file')
with open(path, 'r') as f:
code = f.read()
return jsonify(result=code)
@codereview.route('/save_all')
def _save_all():
label = request.args.get('label')
deps = json.loads(request.args.get('deps'))
codes = json.loads(request.args.get('codes'))
project = current_app.config.project
review_db = project.review_db
review_db.add_label(label, deps, codes)
return jsonify(result='done')
# This function add pattern to grep
@codereview.route('/add_pattern')
def _add_pattern():
patt = request.args.get('pattern')
is_regex = request.args.get('is_regex')
engine = current_app.config.project.review_db
engine.add_pattern(patt, is_regex)
project = current_app.config.project
project.pattern_db.save_new_pattern(patt, is_regex)
return jsonify(result='done')
# This function does a temporary grep to the directory
# Not adding the result to database
@codereview.route('/temporary_search')
def _temporary_search():
path = request.args.get('path')
patt = request.args.get('pattern')
is_regex = request.args.get('is_regex')
codesearch = current_app.config.project.codesearch
result = codesearch.raw_search(patt, is_regex).decode('utf-8')
dic = collections.defaultdict(list)
patt = re.compile('([^:]+):(\\d+):(.*)$')
for line in result.split('\n'):
match = patt.match(line)
if not match:
continue
file_path = match.group(1)
line_no = match.group(2)
code = match.group(3)
dic[file_path].append((line_no, code))
def compare(item1, item2):
key1, value1 = item1
key2, value2 = item2
cnt1 = os.path.commonprefix([path, key1]).count('/')
cnt2 = os.path.commonprefix([path, key2]).count('/')
e1 = os.path.relpath(key1, path).count('/')
e2 = os.path.relpath(key2, path).count('/')
# prefer smaller edit distance
if e1 < e2: return -1
if e2 < e1: return 1
# prefer deeper common ancestor
if cnt1 > cnt2: return -1
if cnt2 > cnt1: return 1
# lexicographical order
if key1 < key2: return -1
if key2 < key1: return 1
return 0
result = sorted(dic.items(), key=functools.cmp_to_key(compare))
return jsonify(result=json.dumps(result))
@codereview.route('/')
def render():
return render_template('index.html')
def create_app(project):
app = Flask(__name__)
app.register_blueprint(codereview)
app.config.project = project
return app

View File

@@ -1,26 +0,0 @@
h3, h4 {
display: inline-block;
}
pre {
white-space: pre-wrap; /* Since CSS 2.1 */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
word-wrap: break-word; /* Internet Explorer 5.5+ */
background-color: #ffffff;
margin: 0;
}
@media (min-width: 768px) {
.modal-xl {
width: 90%;
max-width:1200px;
}
}
.affix {
top:50px;
right:0;
position:fixed;
}

View File

@@ -1,333 +0,0 @@
(function () {
'use strict';
var ccounter = 0;
var counter = 0;
var current_item = null;
// make item list sortable
$( function() {
$("#item_list").sortable();
$("#item_list").disableSelection();
});
function moveToTop(index) {
if (index == 0) {
return;
}
let target = $('#item_list').children().eq(index);
let tp = $('#item_list').children().eq(0);
let old_offset = target.position();
tp.before(target);
let new_offset = target.position();
let tmp = target.clone().appendTo('#item_list')
.css('position', 'absolute')
.css('left', old_offset.left)
.css('top', old_offset.top);
target.hide();
let new_pos = {'top': new_offset.top, 'left': new_offset.left};
tmp.animate(new_pos, 'slow', function() {
target.show();
tmp.remove();
});
}
function getSelText() {
let txt = window.getSelection();
$('#selected_text').val(txt);
$('#code_file_path').val($('#browsing_file_path').text());
return txt;
}
function taskHtml(text, cnt) {
return '<li><span class="display" id="dep' + cnt + '">' + text +
'</span>' + '<input type="text" class="edit" style="display:none"/>' +
'<input type="submit" class="delete" value="X">' +'</li>';
}
function codeHtml(text, cnt, okay) {
return (okay? '<li>' : '<li style="color:red;">') +
'<span id="code' + cnt + '">' + text +
'</span><input type="submit" class="delete" value="X">' + '</li>';
}
function itemHtml(done, text) {
let atag = document.createElement('a');
atag.innerText = text;
if (done) {
atag.className = 'list-group-item list-group-item-success';
} else {
atag.className = 'list-group-item list-group-item-danger';
}
let pretag = document.createElement('pre');
pretag.appendChild(atag);
pretag.onclick = setItem;
return pretag;
}
function grepResultHtml(items) {
let ret = document.createElement('p');
for (let i = 0; i < items.length; i++) {
let path = document.createElement('span');
path.style.color = 'purple';
path.style.fontSize = '20px';
path.innerHTML = items[i][0];
ret.appendChild(path);
ret.appendChild(document.createElement('br'));
for (let j = 0; j < items[0][1].length; j++) {
let line_no = items[i][1][j][0];
let content = items[i][1][j][1];
let line_html = document.createElement('font');
line_html.style.color = 'green';
line_html.style.fontSize = '18px';
line_html.innerHTML = line_no + ':';
ret.appendChild(line_html);
let content_html = document.createElement('span');
content_html.style.fontSize = '18px';
content_html.appendChild(document.createTextNode(content));
ret.appendChild(content_html);
ret.appendChild(document.createElement('br'));
}
}
return ret;
}
function enterTask() {
let text = $('#enter_deps').val();
$('#deps_list').append(taskHtml(text, counter));
$('.delete').click(function () {
$(this).parent().remove();
});
counter++;
return false;
}
function setTask(deps) {
$('#deps_list').empty();
counter = 0;
let len = deps.length;
for (let i = 0; i < len; i++) {
let text = deps[i];
$('#deps_list').append(taskHtml(text, counter));
$('.delete').click(function () {
$(this).parent().remove();
});
counter++;
}
}
function enterCode() {
let text = $('#code_file_path').val() + ':' + $('#selected_text').val();
$('#code_list').append(codeHtml(text, ccounter, true));
$('.delete').click(function () {
$(this).parent().remove();
});
ccounter++;
return false;
}
function setCode(codes, okays) {
$('#code_list').empty();
ccounter = 0;
let len = codes.length;
for (let i = 0; i < len; i++) {
let text = codes[i];
$('#code_list').append(codeHtml(text, ccounter, okays[i]));
$('.delete').click(function () {
$(this).parent().remove();
});
ccounter++;
}
}
$(document).ready(onLoad);
function onLoad() {
$.getJSON('/get_started', {
}, function (data) {
$('#item_list').empty();
$('#pattern_list').empty();
const lst = JSON.parse(data.lst);
const done = JSON.parse(data.done);
const pattern_lst = JSON.parse(data.pattern_lst);
let len = done.length;
for (let i = 0; i < len; i++) {
$('#item_list').append(itemHtml(done[i], lst[i]));
}
len = pattern_lst.length;
for (let i = 0; i < len; i++) {
$('#pattern_list').append('<li>' + pattern_lst[i] + '</li>');
}
$('#path_prefix').text(data.path_prefix);
});
}
function saveAll() {
let path = $('#file_path').text();
let line_no = $('#line_no').text();
let deps = new Array();
for (let i = 0; i < counter; i++) {
if ($('#dep' + i).length) {
deps.push($('#dep' + i).text());
}
}
let codes = new Array();
for (let i = 0; i < ccounter; i++) {
if ($('#code' + i).length) {
codes.push($('#code' + i).text());
}
}
if (path == '' || line_no == '') {
return false;
}
if (deps.length > 0) {
current_item.className = 'list-group-item list-group-item-success';
} else {
current_item.className = 'list-group-item list-group-item-danger';
}
$.getJSON('/save_all', {
label: $(current_item).text(),
deps: JSON.stringify(deps),
codes: JSON.stringify(codes)
});
let target = $(current_item).text().split(':')[2];
let children = $('#item_list')[0].children;
let len = children.length;
for (let i = 0; i < len; i++) {
let tt = children[i].getElementsByTagName('a')[0].innerHTML;
if (tt == $(current_item).text()) {
continue;
}
if (children[i].getElementsByTagName('a')[0].className ==
'list-group-item list-group-item-success' ) {
continue;
}
let content = tt.split(':')[2];
if (content == target) {
moveToTop(i);
}
}
return false;
}
function setBrowsingFile(path) {
$('#browsing_file_path').text(path);
$.getJSON('/get_file', {
path: path
}, function (data) {
$('#browsing_file').children().first().text(data.result);
let obj = $('#browsing_file').children().first();
Prism.highlightElement($('#code')[0]);
});
}
function setHighlightLine(line_no) {
$('#browsing_file').attr('data-line', line_no);
}
function setGotoPatternLine(line_no) {
$('#goto_pattern_line').attr('href', '#browsing_file.' + line_no);
}
function unsetHighlightLine() {
$('#browsing_file').removeAttr('data-line');
// Add this line to ensure that all highlight divs are removed
$('.line-highlight').remove();
}
function removeAnchor() {
// Remove the # from the hash,
// as different browsers may or may not include it
var hash = location.hash.replace('#','');
if (hash != '') {
// Clear the hash in the URL
location.hash = '';
}
};
function setItem(evt) {
removeAnchor();
let item = evt.target;
current_item = item;
let name = $(item).text().split(':');
let file = name[0];
let line_no = name[1];
$('#file_path').text(file);
$('#line_no').text(line_no);
$.getJSON('/load_file', {
path: $(item).text()
}, function (data) {
let deps = JSON.parse(data.deps);
let codes = JSON.parse(data.codes);
let okays = JSON.parse(data.okays);
setTask(deps);
setCode(codes, okays);
});
setBrowsingFile(file);
setHighlightLine(line_no);
setGotoPatternLine(line_no);
$('#selected_text').val('');
$('#code_file_path').val('');
$('#enter_deps').val('');
$('html,body').scrollTop(0);
return false;
}
$('#go_form').submit(function () {
// get all the inputs into an array.
const $inputs = $('#go_form :input');
let values = {};
$inputs.each(function () {
values[this.name] = $(this).val();
});
let path = $('input[name="browsing_path"]').val();
setBrowsingFile(path);
unsetHighlightLine();
return false;
});
$('#add_pattern').submit(function () {
const $inputs = $('#add_pattern :input');
let values = {};
$inputs.each(function () {
values[this.name] = $(this).val();
});
$.getJSON('/add_pattern', {
pattern: values['pattern'],
is_regex: $('#is_regex').is(':checked') ? 1 : 0
});
return true;
});
$('#temporary_search').submit(function() {
const $inputs = $('#temporary_search :input');
let values = {};
$inputs.each(function () {
values[this.name] = $(this).val();
});
$('#modal_title').text(values['pattern']);
$.getJSON('/temporary_search', {
path: $('#file_path').text(),
pattern: values['pattern'],
is_regex: $('#is_regex2').is(':checked') ? 1 : 0
}, function (data) {
$('#modal_body').append(grepResultHtml(JSON.parse(data.result)));
$('#myModal').modal('show');
});
return false;
});
// clear previous html code in modal on hide
$('#myModal').on('hidden.bs.modal', function () {
$('#modal_body').empty();
})
$('#add_deps').submit(enterTask);
$('#add_code').submit(enterCode);
$('#save_all').submit(saveAll);
$('#get_selection').click(getSelText);
})();

View File

@@ -1,308 +0,0 @@
/* http://prismjs.com/download.html?themes=prism-coy&languages=clike+c&plugins=line-highlight+line-numbers */
/**
* prism.js Coy theme for JavaScript, CoffeeScript, CSS and HTML
* Based on https://github.com/tshedor/workshop-wp-theme (Example: http://workshop.kansan.com/category/sessions/basics or http://workshop.timshedor.com/category/sessions/basics);
* @author Tim Shedor
*/
code[class*="language-"],
pre[class*="language-"] {
color: black;
background: none;
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
text-align: left;
white-space: pre;
word-spacing: normal;
word-break: normal;
word-wrap: normal;
line-height: 1.5;
-moz-tab-size: 4;
-o-tab-size: 4;
tab-size: 4;
-webkit-hyphens: none;
-moz-hyphens: none;
-ms-hyphens: none;
hyphens: none;
}
/* Code blocks */
pre[class*="language-"] {
position: relative;
margin: .5em 0;
box-shadow: -1px 0px 0px 0px #358ccb, 0px 0px 0px 1px #dfdfdf;
border-left: 10px solid #358ccb;
background-color: #fdfdfd;
background-image: linear-gradient(transparent 50%, rgba(69, 142, 209, 0.04) 50%);
background-size: 3em 3em;
background-origin: content-box;
overflow: visible;
padding: 0;
}
code[class*="language"] {
max-height: inherit;
height: 100%;
padding: 0 1em;
display: block;
overflow: auto;
}
/* Margin bottom to accomodate shadow */
:not(pre) > code[class*="language-"],
pre[class*="language-"] {
background-color: #fdfdfd;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
margin-bottom: 1em;
}
/* Inline code */
:not(pre) > code[class*="language-"] {
position: relative;
padding: .2em;
border-radius: 0.3em;
color: #c92c2c;
border: 1px solid rgba(0, 0, 0, 0.1);
display: inline;
white-space: normal;
}
pre[class*="language-"]:before,
pre[class*="language-"]:after {
content: '';
z-index: -2;
display: block;
position: absolute;
bottom: 0.75em;
left: 0.18em;
width: 40%;
height: 20%;
max-height: 13em;
box-shadow: 0px 13px 8px #979797;
-webkit-transform: rotate(-2deg);
-moz-transform: rotate(-2deg);
-ms-transform: rotate(-2deg);
-o-transform: rotate(-2deg);
transform: rotate(-2deg);
}
:not(pre) > code[class*="language-"]:after,
pre[class*="language-"]:after {
right: 0.75em;
left: auto;
-webkit-transform: rotate(2deg);
-moz-transform: rotate(2deg);
-ms-transform: rotate(2deg);
-o-transform: rotate(2deg);
transform: rotate(2deg);
}
.token.comment,
.token.block-comment,
.token.prolog,
.token.doctype,
.token.cdata {
color: #7D8B99;
}
.token.punctuation {
color: #5F6364;
}
.token.property,
.token.tag,
.token.boolean,
.token.number,
.token.function-name,
.token.constant,
.token.symbol,
.token.deleted {
color: #c92c2c;
}
.token.selector,
.token.attr-name,
.token.string,
.token.char,
.token.function,
.token.builtin,
.token.inserted {
color: #2f9c0a;
}
.token.operator,
.token.entity,
.token.url,
.token.variable {
color: #a67f59;
background: rgba(255, 255, 255, 0.5);
}
.token.atrule,
.token.attr-value,
.token.keyword,
.token.class-name {
color: #1990b8;
}
.token.regex,
.token.important {
color: #e90;
}
.language-css .token.string,
.style .token.string {
color: #a67f59;
background: rgba(255, 255, 255, 0.5);
}
.token.important {
font-weight: normal;
}
.token.bold {
font-weight: bold;
}
.token.italic {
font-style: italic;
}
.token.entity {
cursor: help;
}
.namespace {
opacity: .7;
}
@media screen and (max-width: 767px) {
pre[class*="language-"]:before,
pre[class*="language-"]:after {
bottom: 14px;
box-shadow: none;
}
}
/* Plugin styles */
.token.tab:not(:empty):before,
.token.cr:before,
.token.lf:before {
color: #e0d7d1;
}
/* Plugin styles: Line Numbers */
pre[class*="language-"].line-numbers {
padding-left: 0;
}
pre[class*="language-"].line-numbers code {
padding-left: 3.8em;
}
pre[class*="language-"].line-numbers .line-numbers-rows {
left: 0;
}
/* Plugin styles: Line Highlight */
pre[class*="language-"][data-line] {
padding-top: 0;
padding-bottom: 0;
padding-left: 0;
}
pre[data-line] code {
position: relative;
padding-left: 4em;
}
pre .line-highlight {
margin-top: 0;
}
pre[data-line] {
position: relative;
padding: 1em 0 1em 3em;
}
.line-highlight {
position: absolute;
left: 0;
right: 0;
padding: inherit 0;
margin-top: 1em; /* Same as .prisms padding-top */
background: hsla(24, 20%, 50%,.08);
background: linear-gradient(to right, hsla(24, 20%, 50%,.1) 70%, hsla(24, 20%, 50%,0));
pointer-events: none;
line-height: inherit;
white-space: pre;
}
.line-highlight:before,
.line-highlight[data-end]:after {
content: attr(data-start);
position: absolute;
top: .4em;
left: .6em;
min-width: 1em;
padding: 0 .5em;
background-color: hsla(24, 20%, 50%,.4);
color: hsl(24, 20%, 95%);
font: bold 65%/1.5 sans-serif;
text-align: center;
vertical-align: .3em;
border-radius: 999px;
text-shadow: none;
box-shadow: 0 1px white;
}
.line-highlight[data-end]:after {
content: attr(data-end);
top: auto;
bottom: .4em;
}
pre.line-numbers {
position: relative;
padding-left: 3.8em;
counter-reset: linenumber;
}
pre.line-numbers > code {
position: relative;
}
.line-numbers .line-numbers-rows {
position: absolute;
pointer-events: none;
top: 0;
font-size: 100%;
left: -3.8em;
width: 3em; /* works for line-numbers below 1000 lines */
letter-spacing: -1px;
border-right: 1px solid #999;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
.line-numbers-rows > span {
pointer-events: none;
display: block;
counter-increment: linenumber;
}
.line-numbers-rows > span:before {
content: counter(linenumber);
color: #999;
display: block;
padding-right: 0.8em;
text-align: right;
}

View File

@@ -1,824 +0,0 @@
/* http://prismjs.com/download.html?themes=prism-coy&languages=clike+c&plugins=line-highlight+line-numbers */
var _self = (typeof window !== 'undefined')
? window // if in browser
: (
(typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope)
? self // if in worker
: {} // if in node js
);
/**
* Prism: Lightweight, robust, elegant syntax highlighting
* MIT license http://www.opensource.org/licenses/mit-license.php/
* @author Lea Verou http://lea.verou.me
*/
var Prism = (function(){
// Private helper vars
var lang = /\blang(?:uage)?-(\w+)\b/i;
var uniqueId = 0;
var _ = _self.Prism = {
manual: _self.Prism && _self.Prism.manual,
util: {
encode: function (tokens) {
if (tokens instanceof Token) {
return new Token(tokens.type, _.util.encode(tokens.content), tokens.alias);
} else if (_.util.type(tokens) === 'Array') {
return tokens.map(_.util.encode);
} else {
return tokens.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/\u00a0/g, ' ');
}
},
type: function (o) {
return Object.prototype.toString.call(o).match(/\[object (\w+)\]/)[1];
},
objId: function (obj) {
if (!obj['__id']) {
Object.defineProperty(obj, '__id', { value: ++uniqueId });
}
return obj['__id'];
},
// Deep clone a language definition (e.g. to extend it)
clone: function (o) {
var type = _.util.type(o);
switch (type) {
case 'Object':
var clone = {};
for (var key in o) {
if (o.hasOwnProperty(key)) {
clone[key] = _.util.clone(o[key]);
}
}
return clone;
case 'Array':
// Check for existence for IE8
return o.map && o.map(function(v) { return _.util.clone(v); });
}
return o;
}
},
languages: {
extend: function (id, redef) {
var lang = _.util.clone(_.languages[id]);
for (var key in redef) {
lang[key] = redef[key];
}
return lang;
},
/**
* Insert a token before another token in a language literal
* As this needs to recreate the object (we cannot actually insert before keys in object literals),
* we cannot just provide an object, we need anobject and a key.
* @param inside The key (or language id) of the parent
* @param before The key to insert before. If not provided, the function appends instead.
* @param insert Object with the key/value pairs to insert
* @param root The object that contains `inside`. If equal to Prism.languages, it can be omitted.
*/
insertBefore: function (inside, before, insert, root) {
root = root || _.languages;
var grammar = root[inside];
if (arguments.length == 2) {
insert = arguments[1];
for (var newToken in insert) {
if (insert.hasOwnProperty(newToken)) {
grammar[newToken] = insert[newToken];
}
}
return grammar;
}
var ret = {};
for (var token in grammar) {
if (grammar.hasOwnProperty(token)) {
if (token == before) {
for (var newToken in insert) {
if (insert.hasOwnProperty(newToken)) {
ret[newToken] = insert[newToken];
}
}
}
ret[token] = grammar[token];
}
}
// Update references in other language definitions
_.languages.DFS(_.languages, function(key, value) {
if (value === root[inside] && key != inside) {
this[key] = ret;
}
});
return root[inside] = ret;
},
// Traverse a language definition with Depth First Search
DFS: function(o, callback, type, visited) {
visited = visited || {};
for (var i in o) {
if (o.hasOwnProperty(i)) {
callback.call(o, i, o[i], type || i);
if (_.util.type(o[i]) === 'Object' && !visited[_.util.objId(o[i])]) {
visited[_.util.objId(o[i])] = true;
_.languages.DFS(o[i], callback, null, visited);
}
else if (_.util.type(o[i]) === 'Array' && !visited[_.util.objId(o[i])]) {
visited[_.util.objId(o[i])] = true;
_.languages.DFS(o[i], callback, i, visited);
}
}
}
}
},
plugins: {},
highlightAll: function(async, callback) {
var env = {
callback: callback,
selector: 'code[class*="language-"], [class*="language-"] code, code[class*="lang-"], [class*="lang-"] code'
};
_.hooks.run("before-highlightall", env);
var elements = env.elements || document.querySelectorAll(env.selector);
for (var i=0, element; element = elements[i++];) {
_.highlightElement(element, async === true, env.callback);
}
},
highlightElement: function(element, async, callback) {
// Find language
var language, grammar, parent = element;
while (parent && !lang.test(parent.className)) {
parent = parent.parentNode;
}
if (parent) {
language = (parent.className.match(lang) || [,''])[1].toLowerCase();
grammar = _.languages[language];
}
// Set language on the element, if not present
element.className = element.className.replace(lang, '').replace(/\s+/g, ' ') + ' language-' + language;
// Set language on the parent, for styling
parent = element.parentNode;
if (/pre/i.test(parent.nodeName)) {
parent.className = parent.className.replace(lang, '').replace(/\s+/g, ' ') + ' language-' + language;
}
var code = element.textContent;
var env = {
element: element,
language: language,
grammar: grammar,
code: code
};
_.hooks.run('before-sanity-check', env);
if (!env.code || !env.grammar) {
if (env.code) {
_.hooks.run('before-highlight', env);
env.element.textContent = env.code;
_.hooks.run('after-highlight', env);
}
_.hooks.run('complete', env);
return;
}
_.hooks.run('before-highlight', env);
if (async && _self.Worker) {
var worker = new Worker(_.filename);
worker.onmessage = function(evt) {
env.highlightedCode = evt.data;
_.hooks.run('before-insert', env);
env.element.innerHTML = env.highlightedCode;
callback && callback.call(env.element);
_.hooks.run('after-highlight', env);
_.hooks.run('complete', env);
};
worker.postMessage(JSON.stringify({
language: env.language,
code: env.code,
immediateClose: true
}));
}
else {
env.highlightedCode = _.highlight(env.code, env.grammar, env.language);
_.hooks.run('before-insert', env);
env.element.innerHTML = env.highlightedCode;
callback && callback.call(element);
_.hooks.run('after-highlight', env);
_.hooks.run('complete', env);
}
},
highlight: function (text, grammar, language) {
var tokens = _.tokenize(text, grammar);
return Token.stringify(_.util.encode(tokens), language);
},
matchGrammar: function (text, strarr, grammar, index, startPos, oneshot, target) {
var Token = _.Token;
for (var token in grammar) {
if(!grammar.hasOwnProperty(token) || !grammar[token]) {
continue;
}
if (token == target) {
return;
}
var patterns = grammar[token];
patterns = (_.util.type(patterns) === "Array") ? patterns : [patterns];
for (var j = 0; j < patterns.length; ++j) {
var pattern = patterns[j],
inside = pattern.inside,
lookbehind = !!pattern.lookbehind,
greedy = !!pattern.greedy,
lookbehindLength = 0,
alias = pattern.alias;
if (greedy && !pattern.pattern.global) {
// Without the global flag, lastIndex won't work
var flags = pattern.pattern.toString().match(/[imuy]*$/)[0];
pattern.pattern = RegExp(pattern.pattern.source, flags + "g");
}
pattern = pattern.pattern || pattern;
// Dont cache length as it changes during the loop
for (var i = index, pos = startPos; i < strarr.length; pos += strarr[i].length, ++i) {
var str = strarr[i];
if (strarr.length > text.length) {
// Something went terribly wrong, ABORT, ABORT!
return;
}
if (str instanceof Token) {
continue;
}
pattern.lastIndex = 0;
var match = pattern.exec(str),
delNum = 1;
// Greedy patterns can override/remove up to two previously matched tokens
if (!match && greedy && i != strarr.length - 1) {
pattern.lastIndex = pos;
match = pattern.exec(text);
if (!match) {
break;
}
var from = match.index + (lookbehind ? match[1].length : 0),
to = match.index + match[0].length,
k = i,
p = pos;
for (var len = strarr.length; k < len && (p < to || (!strarr[k].type && !strarr[k - 1].greedy)); ++k) {
p += strarr[k].length;
// Move the index i to the element in strarr that is closest to from
if (from >= p) {
++i;
pos = p;
}
}
/*
* If strarr[i] is a Token, then the match starts inside another Token, which is invalid
* If strarr[k - 1] is greedy we are in conflict with another greedy pattern
*/
if (strarr[i] instanceof Token || strarr[k - 1].greedy) {
continue;
}
// Number of tokens to delete and replace with the new match
delNum = k - i;
str = text.slice(pos, p);
match.index -= pos;
}
if (!match) {
if (oneshot) {
break;
}
continue;
}
if(lookbehind) {
lookbehindLength = match[1].length;
}
var from = match.index + lookbehindLength,
match = match[0].slice(lookbehindLength),
to = from + match.length,
before = str.slice(0, from),
after = str.slice(to);
var args = [i, delNum];
if (before) {
++i;
pos += before.length;
args.push(before);
}
var wrapped = new Token(token, inside? _.tokenize(match, inside) : match, alias, match, greedy);
args.push(wrapped);
if (after) {
args.push(after);
}
Array.prototype.splice.apply(strarr, args);
if (delNum != 1)
_.matchGrammar(text, strarr, grammar, i, pos, true, token);
if (oneshot)
break;
}
}
}
},
tokenize: function(text, grammar, language) {
var strarr = [text];
var rest = grammar.rest;
if (rest) {
for (var token in rest) {
grammar[token] = rest[token];
}
delete grammar.rest;
}
_.matchGrammar(text, strarr, grammar, 0, 0, false);
return strarr;
},
hooks: {
all: {},
add: function (name, callback) {
var hooks = _.hooks.all;
hooks[name] = hooks[name] || [];
hooks[name].push(callback);
},
run: function (name, env) {
var callbacks = _.hooks.all[name];
if (!callbacks || !callbacks.length) {
return;
}
for (var i=0, callback; callback = callbacks[i++];) {
callback(env);
}
}
}
};
var Token = _.Token = function(type, content, alias, matchedStr, greedy) {
this.type = type;
this.content = content;
this.alias = alias;
// Copy of the full string this token was created from
this.length = (matchedStr || "").length|0;
this.greedy = !!greedy;
};
Token.stringify = function(o, language, parent) {
if (typeof o == 'string') {
return o;
}
if (_.util.type(o) === 'Array') {
return o.map(function(element) {
return Token.stringify(element, language, o);
}).join('');
}
var env = {
type: o.type,
content: Token.stringify(o.content, language, parent),
tag: 'span',
classes: ['token', o.type],
attributes: {},
language: language,
parent: parent
};
if (env.type == 'comment') {
env.attributes['spellcheck'] = 'true';
}
if (o.alias) {
var aliases = _.util.type(o.alias) === 'Array' ? o.alias : [o.alias];
Array.prototype.push.apply(env.classes, aliases);
}
_.hooks.run('wrap', env);
var attributes = Object.keys(env.attributes).map(function(name) {
return name + '="' + (env.attributes[name] || '').replace(/"/g, '&quot;') + '"';
}).join(' ');
return '<' + env.tag + ' class="' + env.classes.join(' ') + '"' + (attributes ? ' ' + attributes : '') + '>' + env.content + '</' + env.tag + '>';
};
if (!_self.document) {
if (!_self.addEventListener) {
// in Node.js
return _self.Prism;
}
// In worker
_self.addEventListener('message', function(evt) {
var message = JSON.parse(evt.data),
lang = message.language,
code = message.code,
immediateClose = message.immediateClose;
_self.postMessage(_.highlight(code, _.languages[lang], lang));
if (immediateClose) {
_self.close();
}
}, false);
return _self.Prism;
}
//Get current script and highlight
var script = document.currentScript || [].slice.call(document.getElementsByTagName("script")).pop();
if (script) {
_.filename = script.src;
if (document.addEventListener && !_.manual && !script.hasAttribute('data-manual')) {
if(document.readyState !== "loading") {
if (window.requestAnimationFrame) {
window.requestAnimationFrame(_.highlightAll);
} else {
window.setTimeout(_.highlightAll, 16);
}
}
else {
document.addEventListener('DOMContentLoaded', _.highlightAll);
}
}
}
return _self.Prism;
})();
if (typeof module !== 'undefined' && module.exports) {
module.exports = Prism;
}
// hack for components to work correctly in node.js
if (typeof global !== 'undefined') {
global.Prism = Prism;
}
;
Prism.languages.clike = {
'comment': [
{
pattern: /(^|[^\\])\/\*[\s\S]*?\*\//,
lookbehind: true
},
{
pattern: /(^|[^\\:])\/\/.*/,
lookbehind: true
}
],
'string': {
pattern: /(["'])(\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,
greedy: true
},
'class-name': {
pattern: /((?:\b(?:class|interface|extends|implements|trait|instanceof|new)\s+)|(?:catch\s+\())[a-z0-9_\.\\]+/i,
lookbehind: true,
inside: {
punctuation: /(\.|\\)/
}
},
'keyword': /\b(if|else|while|do|for|return|in|instanceof|function|new|try|throw|catch|finally|null|break|continue)\b/,
'boolean': /\b(true|false)\b/,
'function': /[a-z0-9_]+(?=\()/i,
'number': /\b-?(?:0x[\da-f]+|\d*\.?\d+(?:e[+-]?\d+)?)\b/i,
'operator': /--?|\+\+?|!=?=?|<=?|>=?|==?=?|&&?|\|\|?|\?|\*|\/|~|\^|%/,
'punctuation': /[{}[\];(),.:]/
};
Prism.languages.c = Prism.languages.extend('clike', {
'keyword': /\b(asm|typeof|inline|auto|break|case|char|const|continue|default|do|double|else|enum|extern|float|for|goto|if|int|long|register|return|short|signed|sizeof|static|struct|switch|typedef|union|unsigned|void|volatile|while)\b/,
'operator': /\-[>-]?|\+\+?|!=?|<<?=?|>>?=?|==?|&&?|\|?\||[~^%?*\/]/,
'number': /\b-?(?:0x[\da-f]+|\d*\.?\d+(?:e[+-]?\d+)?)[ful]*\b/i
});
Prism.languages.insertBefore('c', 'string', {
'macro': {
// allow for multiline macro definitions
// spaces after the # character compile fine with gcc
pattern: /(^\s*)#\s*[a-z]+([^\r\n\\]|\\.|\\(?:\r\n?|\n))*/im,
lookbehind: true,
alias: 'property',
inside: {
// highlight the path of the include statement as a string
'string': {
pattern: /(#\s*include\s*)(<.+?>|("|')(\\?.)+?\3)/,
lookbehind: true
},
// highlight macro directives as keywords
'directive': {
pattern: /(#\s*)\b(define|elif|else|endif|error|ifdef|ifndef|if|import|include|line|pragma|undef|using)\b/,
lookbehind: true,
alias: 'keyword'
}
}
},
// highlight predefined macros as constants
'constant': /\b(__FILE__|__LINE__|__DATE__|__TIME__|__TIMESTAMP__|__func__|EOF|NULL|stdin|stdout|stderr)\b/
});
delete Prism.languages.c['class-name'];
delete Prism.languages.c['boolean'];
(function(){
if (typeof self === 'undefined' || !self.Prism || !self.document || !document.querySelector) {
return;
}
function $$(expr, con) {
return Array.prototype.slice.call((con || document).querySelectorAll(expr));
}
function hasClass(element, className) {
className = " " + className + " ";
return (" " + element.className + " ").replace(/[\n\t]/g, " ").indexOf(className) > -1
}
// Some browsers round the line-height, others don't.
// We need to test for it to position the elements properly.
var isLineHeightRounded = (function() {
var res;
return function() {
if(typeof res === 'undefined') {
var d = document.createElement('div');
d.style.fontSize = '13px';
d.style.lineHeight = '1.5';
d.style.padding = 0;
d.style.border = 0;
d.innerHTML = '&nbsp;<br />&nbsp;';
document.body.appendChild(d);
// Browsers that round the line-height should have offsetHeight === 38
// The others should have 39.
res = d.offsetHeight === 38;
document.body.removeChild(d);
}
return res;
}
}());
function getOffsetById(id) {
var element = document.getElementById(id);
var bodyRect = document.body.getBoundingClientRect();
var elemRect = element.getBoundingClientRect();
var elementOffset = elemRect.top - bodyRect.top;
return elementOffset;
}
function highlightLines(pre, lines, classes) {
var ranges = lines.replace(/\s+/g, '').split(',');
var offset = getOffsetById('browsing_file');
var parseMethod = isLineHeightRounded() ? parseInt : parseFloat;
var lineHeight = parseMethod(getComputedStyle(pre).lineHeight);
for (var i=0, range; range = ranges[i++];) {
range = range.split('-');
var start = +range[0],
end = +range[1] || start;
var line = document.createElement('div');
line.textContent = Array(end - start + 2).join(' \n');
line.setAttribute('aria-hidden', 'true');
line.className = (classes || '') + ' line-highlight';
//if the line-numbers plugin is enabled, then there is no reason for this plugin to display the line numbers
if(!hasClass(pre, 'line-numbers')) {
line.setAttribute('data-start', start);
if(end > start) {
line.setAttribute('data-end', end);
}
}
line.style.top = (getOffsetById('line_no' + start) - offset) + 'px';
//allow this to play nicely with the line-numbers plugin
if(hasClass(pre, 'line-numbers')) {
//need to attack to pre as when line-numbers is enabled, the code tag is relatively which screws up the positioning
pre.appendChild(line);
} else {
(pre.querySelector('code') || pre).appendChild(line);
}
}
}
function applyHash() {
var hash = location.hash.slice(1);
// Remove pre-existing temporary lines
$$('.temporary.line-highlight').forEach(function (line) {
line.parentNode.removeChild(line);
});
var range = (hash.match(/\.([\d,-]+)$/) || [,''])[1];
if (!range || document.getElementById(hash)) {
return;
}
var id = hash.slice(0, hash.lastIndexOf('.')),
pre = document.getElementById(id);
if (!pre) {
return;
}
if (!pre.hasAttribute('data-line')) {
pre.setAttribute('data-line', '');
}
highlightLines(pre, range, 'temporary ');
document.querySelector('.temporary.line-highlight').scrollIntoView();
}
var fakeTimer = 0; // Hack to limit the number of times applyHash() runs
Prism.hooks.add('before-sanity-check', function(env) {
var pre = env.element.parentNode;
var lines = pre && pre.getAttribute('data-line');
if (!pre || !lines || !/pre/i.test(pre.nodeName)) {
return;
}
/*
* Cleanup for other plugins (e.g. autoloader).
*
* Sometimes <code> blocks are highlighted multiple times. It is necessary
* to cleanup any left-over tags, because the whitespace inside of the <div>
* tags change the content of the <code> tag.
*/
var num = 0;
$$('.line-highlight', pre).forEach(function (line) {
num += line.textContent.length;
line.parentNode.removeChild(line);
});
// Remove extra whitespace
if (num && /^( \n)+$/.test(env.code.slice(-num))) {
env.code = env.code.slice(0, -num);
}
});
Prism.hooks.add('complete', function (env) {
if (!env.code) {
return;
}
// works only for <code> wrapped inside <pre> (not inline)
var pre = env.element.parentNode;
var clsReg = /\s*\bline-numbers\b\s*/;
if (
!pre || !/pre/i.test(pre.nodeName) ||
// Abort only if nor the <pre> nor the <code> have the class
(!clsReg.test(pre.className) && !clsReg.test(env.element.className))
) {
return;
}
if (env.element.querySelector(".line-numbers-rows")) {
// Abort if line numbers already exists
return;
}
if (clsReg.test(env.element.className)) {
// Remove the class "line-numbers" from the <code>
env.element.className = env.element.className.replace(clsReg, '');
}
if (!clsReg.test(pre.className)) {
// Add the class "line-numbers" to the <pre>
pre.className += ' line-numbers';
}
var match = env.code.match(/\n(?!$)/g);
var linesNum = match ? match.length + 1 : 1;
var lineNumbersWrapper;
var lines = '';
for (let i = 1; i < linesNum + 1; i++) {
lines += '<span id="line_no' + i + '"></span>';
}
lineNumbersWrapper = document.createElement('span');
lineNumbersWrapper.setAttribute('aria-hidden', 'true');
lineNumbersWrapper.className = 'line-numbers-rows';
lineNumbersWrapper.innerHTML = lines;
if (pre.hasAttribute('data-start')) {
pre.style.counterReset = 'linenumber ' + (parseInt(pre.getAttribute('data-start'), 10) - 1);
}
env.element.appendChild(lineNumbersWrapper);
});
Prism.hooks.add('complete', function(env) {
var pre = env.element.parentNode;
var lines = pre && pre.getAttribute('data-line');
if (!pre || !lines || !/pre/i.test(pre.nodeName)) {
return;
}
clearTimeout(fakeTimer);
highlightLines(pre, lines);
fakeTimer = setTimeout(applyHash, 1);
});
if(window.addEventListener) {
window.addEventListener('hashchange', applyHash);
}
})();
(function() {
if (typeof self === 'undefined' || !self.Prism || !self.document) {
return;
}
}());

View File

@@ -1,111 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Source Deps Reviewer</title>
<meta charset="utf-8"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"/>
<link rel="stylesheet" href="static/prism/css/prism.css"/>
<link rel="stylesheet" href="static/css/main.css"/>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
<!-- Added for sortable list -->
<script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
</head>
<body>
<div class="container-fluid">
<div class="row content">
<h2 style="padding-left:20px;">Code review tool</h2>
<ol id="item_list" class="col-sm-3"></ol>
<div class="col-sm-5">
<h3>Browsing:</h3>
<pre><h4 id="browsing_file_path"></h4></pre><br>
<form id="go_form">
<div class="input-group" style="margin-bottom:10px;">
<span class="input-group-addon" id="path_prefix"></span>
<input type="text" class="form-control" name="browsing_path" placeholder="Enter file path here" aria-describedby="path_prefix">
</div>
<input class="btn btn-primary" type="submit" name="go" value="GO"/>
<a class="btn btn-link" id="goto_pattern_line">goto pattern line</a>
<pre id="browsing_file" class="line-numbers"><code id="code" class="language-C" style="display:inline-block;"></code></pre>
</form>
</div>
<br>
<div class="col-sm-4" data-spy="affix">
<div class="well">
<h3>Temporary search</h3>
<form id="temporary_search" class="input-group" style="padding-left:20px;">
<span class="input-group-addon">is regex</span>
<span class="input-group-addon">
<input type="checkbox" name="is_regex2" id="is_regex2">
</span>
<input type="text" name="pattern" class="form-control">
</form>
<h3>Add patterns to grep</h3>
<form id="add_pattern" class="input-group" style="padding-left:20px;">
<span class="input-group-addon">is regex</span>
<span class="input-group-addon">
<input type="checkbox" name="is_regex" id="is_regex">
</span>
<input type="text" name="pattern" class="form-control">
</form>
<ul id="pattern_list"></ul>
</div>
<div class="well">
<h3>File labeling:</h3>
<pre style="padding-left:20px;"><h4 id="file_path"></h4></pre>
<h3>Pattern line number:</h3>
<h3 id="line_no"></h3><br>
<h3>Library Dependencies</h3>
<form id="add_deps" class="input-group">
<input type="text" class="form-control" id="enter_deps" placeholder="Fill in * if undetermined"/>
<span class="input-group-btn">
<input class="btn btn-secondary" type="submit" value="Add"/>
</span>
</form>
<ul id="deps_list"></ul>
<h3>Code Dependencies</h3>
<form id="add_code">
<input class="btn btn-secondary" type="button" id="get_selection" value="Get selection"/>
<input class="btn btn-secondary" type="submit" id="add_code" value="Add"/><br>
<input type="text" id="code_file_path" style="margin: 0px; width: 100%;"/>
<textarea id="selected_text" name="selectedtext" rows="5" style="margin: 0px; width: 100%; height: 106px;"></textarea>
</form>
<ul id="code_list"></ul>
<form id="save_all">
<input class="btn btn-secondary" type="submit" value="Save All"/>
</form>
</div>
</div>
</div>
</div>
<!-- Modal -->
<div class="modal fade" id="myModal" role="dialog">
<div class="modal-dialog modal-xl">
<!-- Modal content-->
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal">&times;</button>
<h4 id="modal_title" class="modal-title"></h4>
</div>
<div id="modal_body" class="modal-body">
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
<script type="text/javascript" src="static/js/main.js"></script>
<!-- for code prettyify -->
<script src="static/prism/js/prism.js"></script>
</body>
</html>

View File

@@ -1,69 +0,0 @@
#!/usr/bin/env python3
import os
import unittest
from sourcedr.map import (
link_build_dep_and_review_data, load_build_dep_file_from_path,
load_build_dep_ninja, load_review_data)
TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
class MapTest(unittest.TestCase):
MAP_TESTDATA_DIR = os.path.join(TESTDATA_DIR, 'map')
DEP_PATH = os.path.join(MAP_TESTDATA_DIR, 'build_dep.json')
REVIEW_DB_PATH = os.path.join(MAP_TESTDATA_DIR, 'data.json')
NINJA_PATH = os.path.join(MAP_TESTDATA_DIR, 'build.ninja')
NINJA_DEP_PATH = os.path.join(MAP_TESTDATA_DIR, 'ninja_deps')
def test_load_build_dep_file(self):
dep = load_build_dep_file_from_path(self.DEP_PATH)
self.assertIn('liba.so', dep)
self.assertIn('libb.so', dep)
self.assertIn('libc.so', dep)
self.assertSetEqual({'a.h', 'a1.c', 'a1.o', 'a2.c', 'a2.o'},
dep['liba.so'])
self.assertSetEqual({'a.h', 'b.c', 'b.o'}, dep['libb.so'])
self.assertSetEqual({'c.c', 'c.o'}, dep['libc.so'])
def test_load_build_dep_ninja(self):
dep = load_build_dep_ninja(self.NINJA_PATH, self.MAP_TESTDATA_DIR,
self.NINJA_DEP_PATH)
self.assertIn('liba.so', dep)
self.assertIn('libb.so', dep)
self.assertIn('libc.so', dep)
self.assertSetEqual({'a.h', 'a1.c', 'a1.o', 'a2.c', 'a2.o'},
dep['liba.so'])
self.assertSetEqual({'a.h', 'b.c', 'b.o'}, dep['libb.so'])
self.assertSetEqual({'c.c', 'c.o'}, dep['libc.so'])
def test_load_review_data(self):
data = load_review_data(self.REVIEW_DB_PATH)
self.assertIn('a.h', data)
self.assertEqual(['libx.so'], data['a.h'])
def test_link_build_dep_and_review_data(self):
dep = load_build_dep_file_from_path(self.DEP_PATH)
data = load_review_data(self.REVIEW_DB_PATH)
result = link_build_dep_and_review_data(dep, data)
self.assertIn('liba.so', result)
self.assertIn('libb.so', result)
self.assertIn('libc.so', result)
self.assertEqual(['libx.so'], result['liba.so'])
self.assertEqual(['libx.so'], result['libb.so'])
if __name__ == '__main__':
unittest.main()

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env python3
import os
import tempfile
import unittest
from sourcedr.project import Config
TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
class ConfigTest(unittest.TestCase):
PROJECT_DIR = os.path.join(TESTDATA_DIR, 'project')
CONFIG_PATH = os.path.join(PROJECT_DIR, Config.DEFAULT_NAME)
def test_load(self):
config = Config(self.CONFIG_PATH)
config.load()
self.assertEqual('path/to/android/src', config.source_dir)
def test_save(self):
with tempfile.TemporaryDirectory(prefix='test_sourcedr_') as tmp_dir:
config_path = Config.get_default_path(tmp_dir)
config = Config(config_path)
config.source_dir = 'path/to/android/src'
config.save()
with open(config_path, 'r') as actual_fp:
actual = actual_fp.read().strip()
with open(self.CONFIG_PATH, 'r') as expected_fp:
expected = expected_fp.read().strip()
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()

View File

@@ -1,34 +0,0 @@
#!/usr/bin/env python3
import os
import unittest
from sourcedr.codesearch import CodeSearch
from sourcedr.review_db import ReviewDB
TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
ANDROID_DIR = os.path.join(TESTDATA_DIR, 'android_src')
class ReviewDBTest(unittest.TestCase):
def setUp(self):
self.csearch_index_path = 'csearchindex'
self.review_db_path = ReviewDB.DEFAULT_NAME
def tearDown(self):
os.remove(self.csearch_index_path)
os.remove(self.review_db_path)
def test_preprocess(self):
codesearch = CodeSearch(ANDROID_DIR, self.csearch_index_path)
codesearch.build_index()
review_db = ReviewDB(ReviewDB.DEFAULT_NAME, codesearch)
review_db.find(patterns=['dlopen'], is_regexs=[False])
self.assertTrue(os.path.exists(ReviewDB.DEFAULT_NAME))
if __name__ == '__main__':
unittest.main()

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env python3
import json
import os
import tempfile
import unittest
import flask_testing
from sourcedr.project import Project
from sourcedr.review_db import ReviewDB
from sourcedr.server import create_app
TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
ANDROID_DIR = os.path.join(TESTDATA_DIR, 'android_src')
class ViewTest(flask_testing.TestCase):
def create_app(self):
self.tmp_dir = tempfile.TemporaryDirectory(prefix='test_sourcedr_')
project = Project.get_or_create_project_dir(
self.tmp_dir.name, ANDROID_DIR)
project.update_csearch_index(True)
self.project = project
app = create_app(project)
app.config['TESTING'] = True
self.app = app
return app
def setUp(self):
review_db = self.project.review_db
review_db.find(patterns=['dlopen'], is_regexs=[False])
def tearDown(self):
self.tmp_dir.cleanup()
def test_get_file(self):
test_arg = 'example.c'
response = self.client.get('/get_file',
query_string=dict(path=test_arg))
ret = response.json['result']
with open(os.path.join(ANDROID_DIR, test_arg), 'r') as f:
self.assertEqual(ret, f.read())
def test_load_file(self):
test_arg = 'dlopen/test.c'
test_arg += ':10: handle = dlopen("libm.so.6", RTLD_LAZY);'
response = self.client.get('/load_file',
query_string=dict(path=test_arg))
deps = json.loads(response.json['deps'])
codes = json.loads(response.json['codes'])
with open(self.project.review_db.path, 'r') as f:
cdata = json.load(f)
self.assertEqual(deps, cdata[test_arg][0])
self.assertEqual(codes, cdata[test_arg][1])
def test_save_all(self):
label = os.path.abspath('sourcedr/test/dlopen/test.c')
label += ':10: handle = dlopen("libm.so.6", RTLD_LAZY);'
test_arg = {
'label': label,
'deps': json.dumps(['this_is_a_test.so']),
'codes': json.dumps(['arr_0', 'arr_1'])
}
response = self.client.get('/save_all', query_string=test_arg)
cdata = ReviewDB(self.project.review_db.path, None).data
self.assertEqual(['this_is_a_test.so'], cdata[test_arg['label']][0])
self.assertEqual(['arr_0', 'arr_1'], cdata[test_arg['label']][1])
if __name__ == '__main__':
unittest.main()

View File

@@ -1,15 +0,0 @@
rule cc
command = gcc -c -o $out $in -MMD -MF $out.d
deps = gcc
depfile = $out.d
rule ld
command = gcc -o $out $in
build example.o: cc example.c
build example.so: ld example.o
build dlopen/test.o: cc dlopen/test.c
build dlopen/test.so: ld dlopen/test.o

View File

@@ -1,24 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <dlfcn.h>
int main(int argc, char **argv) {
void *handle;
double (*cosine)(double);
char *error;
handle = dlopen("libm.so.6", RTLD_LAZY);
if (!handle) {
fputs (dlerror(), stderr);
exit(1);
}
cosine = dlsym(handle, "cos");
if ((error = dlerror()) != NULL) {
fputs(error, stderr);
exit(1);
}
printf ("%f\n", (*cosine)(2.0));
dlclose(handle);
}

View File

@@ -1,10 +0,0 @@
int main() {
printf("This is a simple testing file\n");
int dlopen_analysis = 1;
"This line with dlopen shouldn't be found"
/*
* This dlopen shouldn't be found
*/
dlopen("dlopen");
handle = dlopen("libm.so.6", RTLD_LAZY);
}

View File

@@ -1 +0,0 @@
dlopen() in .txt file should not be matched

View File

@@ -1,21 +0,0 @@
rule cc
command = gcc -c -o $out $in -MMD -MF $out.d
deps = gcc
depfile = $out.d
rule ld
command = gcc -o $out $in
build liba.so: ld a1.o a2.o
build libb.so: ld b.o
build libc.so: ld c.o
build a1.o: cc a1.c
build a2.o: cc a2.c
build b.o: cc b.c
build c.o: cc c.c

View File

@@ -1,9 +0,0 @@
{
"liba.so": ["libb.so", "libc.so", "a1.o", "a2.o"],
"libb.so": ["b.o"],
"libc.so": ["c.o"],
"a1.o": ["a.h", "a1.c"],
"a2.o": ["a.h", "a2.c"],
"b.o": ["a.h", "b.c"],
"c.o": ["c.c"]
}

View File

@@ -1,6 +0,0 @@
{
"a.h:2:dlopen(\"libx.so\",": [
["libx.so"],
["a.h:2:dlopen(\"libx.so\","]
]
}

View File

@@ -1,6 +0,0 @@
{
"source_dir": "path/to/android/src",
"file_ext_blacklist": [],
"file_name_blacklist": [],
"path_component_blacklist": []
}

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env python3
"""Utility functions or classes."""
import os
class LockedFile(object): # pylint: disable=too-few-public-methods
"""Open a file with `.lock` file and rename it if everything goes well."""
def __init__(self, path, mode):
assert 'x' in mode
self._path = path
self._mode = mode
self._fp = None
def __enter__(self):
"""Open the file at the specified path and with specified mode."""
self._fp = open(self._get_locked_path(self._path), self._mode)
return self._fp
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close the file object and rename the file if there are no
exceptions."""
self._fp.close()
self._fp = None
if exc_val is None:
os.rename(self._get_locked_path(self._path), self._path)
@classmethod
def _get_locked_path(cls, path):
"""Get the file path for the `.lock` file."""
return path + '.lock'
@classmethod
def is_locked(cls, path):
"""Check whether a path is locked."""
return os.path.exists(cls._get_locked_path(path))