slither.core.slither_core

Main module

  1"""
  2    Main module
  3"""
  4import json
  5import logging
  6import os
  7import pathlib
  8import posixpath
  9import re
 10from collections import defaultdict
 11from typing import Optional, Dict, List, Set, Union, Tuple, TypeVar
 12
 13from crytic_compile import CryticCompile
 14from crytic_compile.utils.naming import Filename
 15
 16from slither.core.declarations.contract_level import ContractLevel
 17from slither.core.compilation_unit import SlitherCompilationUnit
 18from slither.core.context.context import Context
 19from slither.core.declarations import Contract, FunctionContract
 20from slither.core.declarations.top_level import TopLevel
 21from slither.core.source_mapping.source_mapping import SourceMapping, Source
 22from slither.slithir.variables import Constant
 23from slither.utils.colors import red
 24from slither.utils.sarif import read_triage_info
 25from slither.utils.source_mapping import get_definition, get_references, get_all_implementations
 26
 27logger = logging.getLogger("Slither")
 28logging.basicConfig()
 29
 30
 31def _relative_path_format(path: str) -> str:
 32    """
 33    Strip relative paths of "." and ".."
 34    """
 35    return path.split("..")[-1].strip(".").strip("/")
 36
 37
 38# pylint: disable=too-many-instance-attributes,too-many-public-methods
 39class SlitherCore(Context):
 40    """
 41    Slither static analyzer
 42    """
 43
 44    def __init__(self) -> None:
 45        super().__init__()
 46
 47        self._filename: Optional[str] = None
 48        self._raw_source_code: Dict[str, str] = {}
 49        self._source_code_to_line: Optional[Dict[str, List[str]]] = None
 50
 51        self._previous_results_filename: str = "slither.db.json"
 52
 53        # TODO: add cli flag to set these variables
 54        self.sarif_input: str = "export.sarif"
 55        self.sarif_triage: str = "export.sarif.sarifexplorer"
 56        self._results_to_hide: List = []
 57        self._previous_results: List = []
 58        # From triaged result
 59        self._previous_results_ids: Set[str] = set()
 60        # Every slither object has a list of result from detector
 61        # Because of the multiple compilation support, we might analyze
 62        # Multiple time the same result, so we remove duplicates
 63        self._currently_seen_resuts: Set[str] = set()
 64        self._paths_to_filter: Set[str] = set()
 65        self._paths_to_include: Set[str] = set()
 66
 67        self._crytic_compile: Optional[CryticCompile] = None
 68
 69        self._generate_patches = False
 70        self._exclude_dependencies = False
 71
 72        self._markdown_root = ""
 73
 74        # If set to true, slither will not catch errors during parsing
 75        self._disallow_partial: bool = False
 76        self._skip_assembly: bool = False
 77
 78        self._show_ignored_findings = False
 79
 80        # Maps from file to detector name to the start/end ranges for that detector.
 81        # Infinity is used to signal a detector has no end range.
 82        self._ignore_ranges: Dict[str, Dict[str, List[Tuple[int, ...]]]] = defaultdict(
 83            lambda: defaultdict(lambda: [(-1, -1)])
 84        )
 85
 86        self._compilation_units: List[SlitherCompilationUnit] = []
 87
 88        self._contracts: List[Contract] = []
 89        self._contracts_derived: List[Contract] = []
 90
 91        self._offset_to_min_offset: Optional[Dict[Filename, Dict[int, Set[int]]]] = None
 92        self._offset_to_objects: Optional[Dict[Filename, Dict[int, Set[SourceMapping]]]] = None
 93        self._offset_to_references: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None
 94        self._offset_to_implementations: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None
 95        self._offset_to_definitions: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None
 96
 97        # Line prefix is used during the source mapping generation
 98        # By default we generate file.sol#1
 99        # But we allow to alter this (ex: file.sol:1) for vscode integration
100        self.line_prefix: str = "#"
101
102        # Use by the echidna printer
103        # If true, partial analysis is allowed
104        self.no_fail = False
105
106        self.skip_data_dependency = False
107
108    @property
109    def compilation_units(self) -> List[SlitherCompilationUnit]:
110        return list(self._compilation_units)
111
112    def add_compilation_unit(self, compilation_unit: SlitherCompilationUnit):
113        self._compilation_units.append(compilation_unit)
114
115    # endregion
116    ###################################################################################
117    ###################################################################################
118    # region Contracts
119    ###################################################################################
120    ###################################################################################
121
122    @property
123    def contracts(self) -> List[Contract]:
124        if not self._contracts:
125            all_contracts = [
126                compilation_unit.contracts for compilation_unit in self._compilation_units
127            ]
128            self._contracts = [item for sublist in all_contracts for item in sublist]
129        return self._contracts
130
131    @property
132    def contracts_derived(self) -> List[Contract]:
133        if not self._contracts_derived:
134            all_contracts = [
135                compilation_unit.contracts_derived for compilation_unit in self._compilation_units
136            ]
137            self._contracts_derived = [item for sublist in all_contracts for item in sublist]
138        return self._contracts_derived
139
140    def get_contract_from_name(self, contract_name: Union[str, Constant]) -> List[Contract]:
141        """
142            Return a contract from a name
143        Args:
144            contract_name (str): name of the contract
145        Returns:
146            Contract
147        """
148        contracts = []
149        for compilation_unit in self._compilation_units:
150            contracts += compilation_unit.get_contract_from_name(contract_name)
151        return contracts
152
153    ###################################################################################
154    ###################################################################################
155    # region Source code
156    ###################################################################################
157    ###################################################################################
158
159    @property
160    def source_code(self) -> Dict[str, str]:
161        """{filename: source_code (str)}: source code"""
162        return self._raw_source_code
163
164    @property
165    def filename(self) -> Optional[str]:
166        """str: Filename."""
167        return self._filename
168
169    @filename.setter
170    def filename(self, filename: str):
171        self._filename = filename
172
173    def add_source_code(self, path: str) -> None:
174        """
175        :param path:
176        :return:
177        """
178        if self.crytic_compile and path in self.crytic_compile.src_content:
179            self.source_code[path] = self.crytic_compile.src_content[path]
180        else:
181            with open(path, encoding="utf8", newline="") as f:
182                self.source_code[path] = f.read()
183
184        self.parse_ignore_comments(path)
185
186    @property
187    def markdown_root(self) -> str:
188        return self._markdown_root
189
190    def print_functions(self, d: str):
191        """
192        Export all the functions to dot files
193        """
194        for compilation_unit in self._compilation_units:
195            for c in compilation_unit.contracts:
196                for f in c.functions:
197                    f.cfg_to_dot(os.path.join(d, f"{c.name}.{f.name}.dot"))
198
199    def _compute_offsets_from_thing(self, thing: SourceMapping):
200        definition = get_definition(thing, self.crytic_compile)
201        references = get_references(thing)
202        implementations = get_all_implementations(thing, self.contracts)
203
204        # Create the offset mapping
205        for offset in range(definition.start, definition.end + 1):
206            self._offset_to_min_offset[definition.filename][offset].add(definition.start)
207
208        is_declared_function = (
209            isinstance(thing, FunctionContract) and thing.contract_declarer == thing.contract
210        )
211
212        should_add_to_objects = (
213            isinstance(thing, (TopLevel, Contract))
214            or is_declared_function
215            or (isinstance(thing, ContractLevel) and not isinstance(thing, FunctionContract))
216        )
217
218        if should_add_to_objects:
219            self._offset_to_objects[definition.filename][definition.start].add(thing)
220
221        self._offset_to_definitions[definition.filename][definition.start].add(definition)
222        self._offset_to_implementations[definition.filename][definition.start].update(
223            implementations
224        )
225        self._offset_to_references[definition.filename][definition.start] |= set(references)
226
227        # For references
228        should_add_to_objects = (
229            isinstance(thing, TopLevel)
230            or is_declared_function
231            or (isinstance(thing, ContractLevel) and not isinstance(thing, FunctionContract))
232        )
233
234        for ref in references:
235            for offset in range(ref.start, ref.end + 1):
236                self._offset_to_min_offset[definition.filename][offset].add(ref.start)
237
238            if should_add_to_objects:
239                self._offset_to_objects[definition.filename][ref.start].add(thing)
240
241            if is_declared_function:
242                # Only show the nearest lexical definition for declared contract-level functions
243                if (
244                    thing.contract.source_mapping.start
245                    < ref.start
246                    < thing.contract.source_mapping.end
247                ):
248
249                    self._offset_to_definitions[ref.filename][ref.start].add(definition)
250
251            else:
252                self._offset_to_definitions[ref.filename][ref.start].add(definition)
253
254            self._offset_to_implementations[ref.filename][ref.start].update(implementations)
255            self._offset_to_references[ref.filename][ref.start] |= set(references)
256
257    def _compute_offsets_to_ref_impl_decl(self):  # pylint: disable=too-many-branches
258        self._offset_to_references = defaultdict(lambda: defaultdict(lambda: set()))
259        self._offset_to_definitions = defaultdict(lambda: defaultdict(lambda: set()))
260        self._offset_to_implementations = defaultdict(lambda: defaultdict(lambda: set()))
261        self._offset_to_objects = defaultdict(lambda: defaultdict(lambda: set()))
262        self._offset_to_min_offset = defaultdict(lambda: defaultdict(lambda: set()))
263
264        for compilation_unit in self._compilation_units:
265            for contract in compilation_unit.contracts:
266                self._compute_offsets_from_thing(contract)
267
268                for function in contract.functions_declared:
269                    self._compute_offsets_from_thing(function)
270                    for variable in function.local_variables:
271                        self._compute_offsets_from_thing(variable)
272                for modifier in contract.modifiers_declared:
273                    self._compute_offsets_from_thing(modifier)
274                    for variable in modifier.local_variables:
275                        self._compute_offsets_from_thing(variable)
276
277                for var in contract.state_variables:
278                    self._compute_offsets_from_thing(var)
279
280                for st in contract.structures:
281                    self._compute_offsets_from_thing(st)
282
283                for enum in contract.enums:
284                    self._compute_offsets_from_thing(enum)
285
286                for event in contract.events:
287                    self._compute_offsets_from_thing(event)
288
289                for typ in contract.type_aliases:
290                    self._compute_offsets_from_thing(typ)
291
292            for enum in compilation_unit.enums_top_level:
293                self._compute_offsets_from_thing(enum)
294            for event in compilation_unit.events_top_level:
295                self._compute_offsets_from_thing(event)
296            for function in compilation_unit.functions_top_level:
297                self._compute_offsets_from_thing(function)
298            for st in compilation_unit.structures_top_level:
299                self._compute_offsets_from_thing(st)
300            for var in compilation_unit.variables_top_level:
301                self._compute_offsets_from_thing(var)
302            for typ in compilation_unit.type_aliases.values():
303                self._compute_offsets_from_thing(typ)
304            for err in compilation_unit.custom_errors:
305                self._compute_offsets_from_thing(err)
306            for event in compilation_unit.events_top_level:
307                self._compute_offsets_from_thing(event)
308            for import_directive in compilation_unit.import_directives:
309                self._compute_offsets_from_thing(import_directive)
310            for pragma in compilation_unit.pragma_directives:
311                self._compute_offsets_from_thing(pragma)
312
313    T = TypeVar("T", Source, SourceMapping)
314
315    def _get_offset(
316        self, mapping: Dict[Filename, Dict[int, Set[T]]], filename_str: str, offset: int
317    ) -> Set[T]:
318        """Get the Source/SourceMapping referenced by the offset.
319
320        For performance reasons, references are only stored once at the lowest offset.
321        It uses the _offset_to_min_offset mapping to retrieve the correct offsets.
322        As multiple definitions can be related to the same offset, we retrieve all of them.
323
324        :param mapping: Mapping to search for (objects. references, ...)
325        :param filename_str: Filename to consider
326        :param offset: Look-up offset
327        :raises IndexError: When the start offset is not found
328        :return: The corresponding set of Source/SourceMapping
329        """
330        filename: Filename = self.crytic_compile.filename_lookup(filename_str)
331
332        start_offsets = self._offset_to_min_offset[filename][offset]
333        if not start_offsets:
334            msg = f"Unable to find reference for offset {offset}"
335            raise IndexError(msg)
336
337        results = set()
338        for start_offset in start_offsets:
339            results |= mapping[filename][start_offset]
340
341        return results
342
343    def offset_to_references(self, filename_str: str, offset: int) -> Set[Source]:
344        if self._offset_to_references is None:
345            self._compute_offsets_to_ref_impl_decl()
346
347        return self._get_offset(self._offset_to_references, filename_str, offset)
348
349    def offset_to_implementations(self, filename_str: str, offset: int) -> Set[Source]:
350        if self._offset_to_implementations is None:
351            self._compute_offsets_to_ref_impl_decl()
352
353        return self._get_offset(self._offset_to_implementations, filename_str, offset)
354
355    def offset_to_definitions(self, filename_str: str, offset: int) -> Set[Source]:
356        if self._offset_to_definitions is None:
357            self._compute_offsets_to_ref_impl_decl()
358
359        return self._get_offset(self._offset_to_definitions, filename_str, offset)
360
361    def offset_to_objects(self, filename_str: str, offset: int) -> Set[SourceMapping]:
362        if self._offset_to_objects is None:
363            self._compute_offsets_to_ref_impl_decl()
364
365        return self._get_offset(self._offset_to_objects, filename_str, offset)
366
367    # endregion
368    ###################################################################################
369    ###################################################################################
370    # region Filtering results
371    ###################################################################################
372    ###################################################################################
373
374    def parse_ignore_comments(self, file: str) -> None:
375        # The first time we check a file, find all start/end ignore comments and memoize them.
376        line_number = 1
377        while True:
378
379            line_text = self.crytic_compile.get_code_from_line(file, line_number)
380            if line_text is None:
381                break
382
383            start_regex = r"^\s*//\s*slither-disable-start\s*([a-zA-Z0-9_,-]*)"
384            end_regex = r"^\s*//\s*slither-disable-end\s*([a-zA-Z0-9_,-]*)"
385            start_match = re.findall(start_regex, line_text.decode("utf8"))
386            end_match = re.findall(end_regex, line_text.decode("utf8"))
387
388            if start_match:
389                ignored = start_match[0].split(",")
390                if ignored:
391                    for check in ignored:
392                        vals = self._ignore_ranges[file][check]
393                        if len(vals) == 0 or vals[-1][1] != float("inf"):
394                            # First item in the array, or the prior item is fully populated.
395                            self._ignore_ranges[file][check].append((line_number, float("inf")))
396                        else:
397                            logger.error(
398                                f"Consecutive slither-disable-starts without slither-disable-end in {file}#{line_number}"
399                            )
400                            return
401
402            if end_match:
403                ignored = end_match[0].split(",")
404                if ignored:
405                    for check in ignored:
406                        vals = self._ignore_ranges[file][check]
407                        if len(vals) == 0 or vals[-1][1] != float("inf"):
408                            logger.error(
409                                f"slither-disable-end without slither-disable-start in {file}#{line_number}"
410                            )
411                            return
412                        self._ignore_ranges[file][check][-1] = (vals[-1][0], line_number)
413
414            line_number += 1
415
416    def has_ignore_comment(self, r: Dict) -> bool:
417        """
418        Check if the result has an ignore comment in the file or on the preceding line, in which
419        case, it is not valid
420        """
421        if not self.crytic_compile:
422            return False
423        mapping_elements_with_lines = (
424            (
425                posixpath.normpath(elem["source_mapping"]["filename_absolute"]),
426                elem["source_mapping"]["lines"],
427            )
428            for elem in r["elements"]
429            if "source_mapping" in elem
430            and "filename_absolute" in elem["source_mapping"]
431            and "lines" in elem["source_mapping"]
432            and len(elem["source_mapping"]["lines"]) > 0
433        )
434
435        for file, lines in mapping_elements_with_lines:
436
437            # Check if result is within an ignored range.
438            ignore_ranges = self._ignore_ranges[file][r["check"]] + self._ignore_ranges[file]["all"]
439            for start, end in ignore_ranges:
440                # The full check must be within the ignore range to be ignored.
441                if start < lines[0] and end > lines[-1]:
442                    return True
443
444            # Check for next-line matchers.
445            ignore_line_index = min(lines) - 1
446            ignore_line_text = self.crytic_compile.get_code_from_line(file, ignore_line_index)
447            if ignore_line_text:
448                match = re.findall(
449                    r"^\s*//\s*slither-disable-next-line\s*([a-zA-Z0-9_,-]*)",
450                    ignore_line_text.decode("utf8"),
451                )
452                if match:
453                    ignored = match[0].split(",")
454                    if ignored and ("all" in ignored or any(r["check"] == c for c in ignored)):
455                        return True
456
457        return False
458
459    def valid_result(self, r: Dict) -> bool:
460        """
461        Check if the result is valid
462        A result is invalid if:
463            - All its source paths belong to the source path filtered
464            - Or a similar result was reported and saved during a previous run
465            - The --exclude-dependencies flag is set and results are only related to dependencies
466            - There is an ignore comment on the preceding line or in the file
467        """
468
469        # Remove duplicate due to the multiple compilation support
470        if r["id"] in self._currently_seen_resuts:
471            return False
472        self._currently_seen_resuts.add(r["id"])
473
474        source_mapping_elements = [
475            elem["source_mapping"].get("filename_absolute", "unknown")
476            for elem in r["elements"]
477            if "source_mapping" in elem
478        ]
479
480        # Use POSIX-style paths so that filter_paths|include_paths works across different
481        # OSes. Convert to a list so elements don't get consumed and are lost
482        # while evaluating the first pattern
483        source_mapping_elements = list(
484            map(lambda x: pathlib.Path(x).resolve().as_posix() if x else x, source_mapping_elements)
485        )
486        (matching, paths, msg_err) = (
487            (True, self._paths_to_include, "--include-paths")
488            if self._paths_to_include
489            else (False, self._paths_to_filter, "--filter-paths")
490        )
491
492        for path in paths:
493            try:
494                if any(
495                    bool(re.search(_relative_path_format(path), src_mapping))
496                    for src_mapping in source_mapping_elements
497                ):
498                    matching = not matching
499                    break
500            except re.error:
501                logger.error(
502                    f"Incorrect regular expression for {msg_err} {path}."
503                    "\nSlither supports the Python re format"
504                    ": https://docs.python.org/3/library/re.html"
505                )
506
507        if r["elements"] and matching:
508            return False
509
510        if self._show_ignored_findings:
511            return True
512        if self.has_ignore_comment(r):
513            return False
514        if r["id"] in self._previous_results_ids:
515            return False
516        if r["elements"] and self._exclude_dependencies:
517            if all(element["source_mapping"]["is_dependency"] for element in r["elements"]):
518                return False
519        # Conserve previous result filtering. This is conserved for compatibility, but is meant to be removed
520        if r["description"] in [pr["description"] for pr in self._previous_results]:
521            return False
522
523        return True
524
525    def load_previous_results(self) -> None:
526        self.load_previous_results_from_sarif()
527
528        filename = self._previous_results_filename
529        try:
530            if os.path.isfile(filename):
531                with open(filename, encoding="utf8") as f:
532                    self._previous_results = json.load(f)
533                    if self._previous_results:
534                        for r in self._previous_results:
535                            if "id" in r:
536                                self._previous_results_ids.add(r["id"])
537
538        except json.decoder.JSONDecodeError:
539            logger.error(red(f"Impossible to decode {filename}. Consider removing the file"))
540
541    def load_previous_results_from_sarif(self) -> None:
542        sarif = pathlib.Path(self.sarif_input)
543        triage = pathlib.Path(self.sarif_triage)
544
545        if not sarif.exists():
546            return
547        if not triage.exists():
548            return
549
550        triaged = read_triage_info(sarif, triage)
551
552        for id_triaged in triaged:
553            self._previous_results_ids.add(id_triaged)
554
555    def write_results_to_hide(self) -> None:
556        if not self._results_to_hide:
557            return
558        filename = self._previous_results_filename
559        with open(filename, "w", encoding="utf8") as f:
560            results = self._results_to_hide + self._previous_results
561            json.dump(results, f)
562
563    def save_results_to_hide(self, results: List[Dict]) -> None:
564        self._results_to_hide += results
565        self.write_results_to_hide()
566
567    def add_path_to_filter(self, path: str):
568        """
569        Add path to filter
570        Path are used through direct comparison (no regex)
571        """
572        self._paths_to_filter.add(path)
573
574    def add_path_to_include(self, path: str):
575        """
576        Add path to include
577        Path are used through direct comparison (no regex)
578        """
579        self._paths_to_include.add(path)
580
581    # endregion
582    ###################################################################################
583    ###################################################################################
584    # region Crytic compile
585    ###################################################################################
586    ###################################################################################
587
588    @property
589    def crytic_compile(self) -> CryticCompile:
590        return self._crytic_compile  # type: ignore
591
592    # endregion
593    ###################################################################################
594    ###################################################################################
595    # region Format
596    ###################################################################################
597    ###################################################################################
598
599    @property
600    def generate_patches(self) -> bool:
601        return self._generate_patches
602
603    @generate_patches.setter
604    def generate_patches(self, p: bool):
605        self._generate_patches = p
606
607    # endregion
608    ###################################################################################
609    ###################################################################################
610    # region Internals
611    ###################################################################################
612    ###################################################################################
613
614    @property
615    def disallow_partial(self) -> bool:
616        """
617        Return true if partial analyses are disallowed
618        For example, codebase with duplicate names will lead to partial analyses
619
620        :return:
621        """
622        return self._disallow_partial
623
624    @property
625    def skip_assembly(self) -> bool:
626        return self._skip_assembly
627
628    @property
629    def show_ignore_findings(self) -> bool:
630        return self._show_ignored_findings
631
632    # endregion
logger = <Logger Slither (WARNING)>
class SlitherCore(slither.core.context.context.Context):
 40class SlitherCore(Context):
 41    """
 42    Slither static analyzer
 43    """
 44
 45    def __init__(self) -> None:
 46        super().__init__()
 47
 48        self._filename: Optional[str] = None
 49        self._raw_source_code: Dict[str, str] = {}
 50        self._source_code_to_line: Optional[Dict[str, List[str]]] = None
 51
 52        self._previous_results_filename: str = "slither.db.json"
 53
 54        # TODO: add cli flag to set these variables
 55        self.sarif_input: str = "export.sarif"
 56        self.sarif_triage: str = "export.sarif.sarifexplorer"
 57        self._results_to_hide: List = []
 58        self._previous_results: List = []
 59        # From triaged result
 60        self._previous_results_ids: Set[str] = set()
 61        # Every slither object has a list of result from detector
 62        # Because of the multiple compilation support, we might analyze
 63        # Multiple time the same result, so we remove duplicates
 64        self._currently_seen_resuts: Set[str] = set()
 65        self._paths_to_filter: Set[str] = set()
 66        self._paths_to_include: Set[str] = set()
 67
 68        self._crytic_compile: Optional[CryticCompile] = None
 69
 70        self._generate_patches = False
 71        self._exclude_dependencies = False
 72
 73        self._markdown_root = ""
 74
 75        # If set to true, slither will not catch errors during parsing
 76        self._disallow_partial: bool = False
 77        self._skip_assembly: bool = False
 78
 79        self._show_ignored_findings = False
 80
 81        # Maps from file to detector name to the start/end ranges for that detector.
 82        # Infinity is used to signal a detector has no end range.
 83        self._ignore_ranges: Dict[str, Dict[str, List[Tuple[int, ...]]]] = defaultdict(
 84            lambda: defaultdict(lambda: [(-1, -1)])
 85        )
 86
 87        self._compilation_units: List[SlitherCompilationUnit] = []
 88
 89        self._contracts: List[Contract] = []
 90        self._contracts_derived: List[Contract] = []
 91
 92        self._offset_to_min_offset: Optional[Dict[Filename, Dict[int, Set[int]]]] = None
 93        self._offset_to_objects: Optional[Dict[Filename, Dict[int, Set[SourceMapping]]]] = None
 94        self._offset_to_references: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None
 95        self._offset_to_implementations: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None
 96        self._offset_to_definitions: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None
 97
 98        # Line prefix is used during the source mapping generation
 99        # By default we generate file.sol#1
100        # But we allow to alter this (ex: file.sol:1) for vscode integration
101        self.line_prefix: str = "#"
102
103        # Use by the echidna printer
104        # If true, partial analysis is allowed
105        self.no_fail = False
106
107        self.skip_data_dependency = False
108
109    @property
110    def compilation_units(self) -> List[SlitherCompilationUnit]:
111        return list(self._compilation_units)
112
113    def add_compilation_unit(self, compilation_unit: SlitherCompilationUnit):
114        self._compilation_units.append(compilation_unit)
115
116    # endregion
117    ###################################################################################
118    ###################################################################################
119    # region Contracts
120    ###################################################################################
121    ###################################################################################
122
123    @property
124    def contracts(self) -> List[Contract]:
125        if not self._contracts:
126            all_contracts = [
127                compilation_unit.contracts for compilation_unit in self._compilation_units
128            ]
129            self._contracts = [item for sublist in all_contracts for item in sublist]
130        return self._contracts
131
132    @property
133    def contracts_derived(self) -> List[Contract]:
134        if not self._contracts_derived:
135            all_contracts = [
136                compilation_unit.contracts_derived for compilation_unit in self._compilation_units
137            ]
138            self._contracts_derived = [item for sublist in all_contracts for item in sublist]
139        return self._contracts_derived
140
141    def get_contract_from_name(self, contract_name: Union[str, Constant]) -> List[Contract]:
142        """
143            Return a contract from a name
144        Args:
145            contract_name (str): name of the contract
146        Returns:
147            Contract
148        """
149        contracts = []
150        for compilation_unit in self._compilation_units:
151            contracts += compilation_unit.get_contract_from_name(contract_name)
152        return contracts
153
154    ###################################################################################
155    ###################################################################################
156    # region Source code
157    ###################################################################################
158    ###################################################################################
159
160    @property
161    def source_code(self) -> Dict[str, str]:
162        """{filename: source_code (str)}: source code"""
163        return self._raw_source_code
164
165    @property
166    def filename(self) -> Optional[str]:
167        """str: Filename."""
168        return self._filename
169
170    @filename.setter
171    def filename(self, filename: str):
172        self._filename = filename
173
174    def add_source_code(self, path: str) -> None:
175        """
176        :param path:
177        :return:
178        """
179        if self.crytic_compile and path in self.crytic_compile.src_content:
180            self.source_code[path] = self.crytic_compile.src_content[path]
181        else:
182            with open(path, encoding="utf8", newline="") as f:
183                self.source_code[path] = f.read()
184
185        self.parse_ignore_comments(path)
186
187    @property
188    def markdown_root(self) -> str:
189        return self._markdown_root
190
191    def print_functions(self, d: str):
192        """
193        Export all the functions to dot files
194        """
195        for compilation_unit in self._compilation_units:
196            for c in compilation_unit.contracts:
197                for f in c.functions:
198                    f.cfg_to_dot(os.path.join(d, f"{c.name}.{f.name}.dot"))
199
200    def _compute_offsets_from_thing(self, thing: SourceMapping):
201        definition = get_definition(thing, self.crytic_compile)
202        references = get_references(thing)
203        implementations = get_all_implementations(thing, self.contracts)
204
205        # Create the offset mapping
206        for offset in range(definition.start, definition.end + 1):
207            self._offset_to_min_offset[definition.filename][offset].add(definition.start)
208
209        is_declared_function = (
210            isinstance(thing, FunctionContract) and thing.contract_declarer == thing.contract
211        )
212
213        should_add_to_objects = (
214            isinstance(thing, (TopLevel, Contract))
215            or is_declared_function
216            or (isinstance(thing, ContractLevel) and not isinstance(thing, FunctionContract))
217        )
218
219        if should_add_to_objects:
220            self._offset_to_objects[definition.filename][definition.start].add(thing)
221
222        self._offset_to_definitions[definition.filename][definition.start].add(definition)
223        self._offset_to_implementations[definition.filename][definition.start].update(
224            implementations
225        )
226        self._offset_to_references[definition.filename][definition.start] |= set(references)
227
228        # For references
229        should_add_to_objects = (
230            isinstance(thing, TopLevel)
231            or is_declared_function
232            or (isinstance(thing, ContractLevel) and not isinstance(thing, FunctionContract))
233        )
234
235        for ref in references:
236            for offset in range(ref.start, ref.end + 1):
237                self._offset_to_min_offset[definition.filename][offset].add(ref.start)
238
239            if should_add_to_objects:
240                self._offset_to_objects[definition.filename][ref.start].add(thing)
241
242            if is_declared_function:
243                # Only show the nearest lexical definition for declared contract-level functions
244                if (
245                    thing.contract.source_mapping.start
246                    < ref.start
247                    < thing.contract.source_mapping.end
248                ):
249
250                    self._offset_to_definitions[ref.filename][ref.start].add(definition)
251
252            else:
253                self._offset_to_definitions[ref.filename][ref.start].add(definition)
254
255            self._offset_to_implementations[ref.filename][ref.start].update(implementations)
256            self._offset_to_references[ref.filename][ref.start] |= set(references)
257
258    def _compute_offsets_to_ref_impl_decl(self):  # pylint: disable=too-many-branches
259        self._offset_to_references = defaultdict(lambda: defaultdict(lambda: set()))
260        self._offset_to_definitions = defaultdict(lambda: defaultdict(lambda: set()))
261        self._offset_to_implementations = defaultdict(lambda: defaultdict(lambda: set()))
262        self._offset_to_objects = defaultdict(lambda: defaultdict(lambda: set()))
263        self._offset_to_min_offset = defaultdict(lambda: defaultdict(lambda: set()))
264
265        for compilation_unit in self._compilation_units:
266            for contract in compilation_unit.contracts:
267                self._compute_offsets_from_thing(contract)
268
269                for function in contract.functions_declared:
270                    self._compute_offsets_from_thing(function)
271                    for variable in function.local_variables:
272                        self._compute_offsets_from_thing(variable)
273                for modifier in contract.modifiers_declared:
274                    self._compute_offsets_from_thing(modifier)
275                    for variable in modifier.local_variables:
276                        self._compute_offsets_from_thing(variable)
277
278                for var in contract.state_variables:
279                    self._compute_offsets_from_thing(var)
280
281                for st in contract.structures:
282                    self._compute_offsets_from_thing(st)
283
284                for enum in contract.enums:
285                    self._compute_offsets_from_thing(enum)
286
287                for event in contract.events:
288                    self._compute_offsets_from_thing(event)
289
290                for typ in contract.type_aliases:
291                    self._compute_offsets_from_thing(typ)
292
293            for enum in compilation_unit.enums_top_level:
294                self._compute_offsets_from_thing(enum)
295            for event in compilation_unit.events_top_level:
296                self._compute_offsets_from_thing(event)
297            for function in compilation_unit.functions_top_level:
298                self._compute_offsets_from_thing(function)
299            for st in compilation_unit.structures_top_level:
300                self._compute_offsets_from_thing(st)
301            for var in compilation_unit.variables_top_level:
302                self._compute_offsets_from_thing(var)
303            for typ in compilation_unit.type_aliases.values():
304                self._compute_offsets_from_thing(typ)
305            for err in compilation_unit.custom_errors:
306                self._compute_offsets_from_thing(err)
307            for event in compilation_unit.events_top_level:
308                self._compute_offsets_from_thing(event)
309            for import_directive in compilation_unit.import_directives:
310                self._compute_offsets_from_thing(import_directive)
311            for pragma in compilation_unit.pragma_directives:
312                self._compute_offsets_from_thing(pragma)
313
314    T = TypeVar("T", Source, SourceMapping)
315
316    def _get_offset(
317        self, mapping: Dict[Filename, Dict[int, Set[T]]], filename_str: str, offset: int
318    ) -> Set[T]:
319        """Get the Source/SourceMapping referenced by the offset.
320
321        For performance reasons, references are only stored once at the lowest offset.
322        It uses the _offset_to_min_offset mapping to retrieve the correct offsets.
323        As multiple definitions can be related to the same offset, we retrieve all of them.
324
325        :param mapping: Mapping to search for (objects. references, ...)
326        :param filename_str: Filename to consider
327        :param offset: Look-up offset
328        :raises IndexError: When the start offset is not found
329        :return: The corresponding set of Source/SourceMapping
330        """
331        filename: Filename = self.crytic_compile.filename_lookup(filename_str)
332
333        start_offsets = self._offset_to_min_offset[filename][offset]
334        if not start_offsets:
335            msg = f"Unable to find reference for offset {offset}"
336            raise IndexError(msg)
337
338        results = set()
339        for start_offset in start_offsets:
340            results |= mapping[filename][start_offset]
341
342        return results
343
344    def offset_to_references(self, filename_str: str, offset: int) -> Set[Source]:
345        if self._offset_to_references is None:
346            self._compute_offsets_to_ref_impl_decl()
347
348        return self._get_offset(self._offset_to_references, filename_str, offset)
349
350    def offset_to_implementations(self, filename_str: str, offset: int) -> Set[Source]:
351        if self._offset_to_implementations is None:
352            self._compute_offsets_to_ref_impl_decl()
353
354        return self._get_offset(self._offset_to_implementations, filename_str, offset)
355
356    def offset_to_definitions(self, filename_str: str, offset: int) -> Set[Source]:
357        if self._offset_to_definitions is None:
358            self._compute_offsets_to_ref_impl_decl()
359
360        return self._get_offset(self._offset_to_definitions, filename_str, offset)
361
362    def offset_to_objects(self, filename_str: str, offset: int) -> Set[SourceMapping]:
363        if self._offset_to_objects is None:
364            self._compute_offsets_to_ref_impl_decl()
365
366        return self._get_offset(self._offset_to_objects, filename_str, offset)
367
368    # endregion
369    ###################################################################################
370    ###################################################################################
371    # region Filtering results
372    ###################################################################################
373    ###################################################################################
374
375    def parse_ignore_comments(self, file: str) -> None:
376        # The first time we check a file, find all start/end ignore comments and memoize them.
377        line_number = 1
378        while True:
379
380            line_text = self.crytic_compile.get_code_from_line(file, line_number)
381            if line_text is None:
382                break
383
384            start_regex = r"^\s*//\s*slither-disable-start\s*([a-zA-Z0-9_,-]*)"
385            end_regex = r"^\s*//\s*slither-disable-end\s*([a-zA-Z0-9_,-]*)"
386            start_match = re.findall(start_regex, line_text.decode("utf8"))
387            end_match = re.findall(end_regex, line_text.decode("utf8"))
388
389            if start_match:
390                ignored = start_match[0].split(",")
391                if ignored:
392                    for check in ignored:
393                        vals = self._ignore_ranges[file][check]
394                        if len(vals) == 0 or vals[-1][1] != float("inf"):
395                            # First item in the array, or the prior item is fully populated.
396                            self._ignore_ranges[file][check].append((line_number, float("inf")))
397                        else:
398                            logger.error(
399                                f"Consecutive slither-disable-starts without slither-disable-end in {file}#{line_number}"
400                            )
401                            return
402
403            if end_match:
404                ignored = end_match[0].split(",")
405                if ignored:
406                    for check in ignored:
407                        vals = self._ignore_ranges[file][check]
408                        if len(vals) == 0 or vals[-1][1] != float("inf"):
409                            logger.error(
410                                f"slither-disable-end without slither-disable-start in {file}#{line_number}"
411                            )
412                            return
413                        self._ignore_ranges[file][check][-1] = (vals[-1][0], line_number)
414
415            line_number += 1
416
417    def has_ignore_comment(self, r: Dict) -> bool:
418        """
419        Check if the result has an ignore comment in the file or on the preceding line, in which
420        case, it is not valid
421        """
422        if not self.crytic_compile:
423            return False
424        mapping_elements_with_lines = (
425            (
426                posixpath.normpath(elem["source_mapping"]["filename_absolute"]),
427                elem["source_mapping"]["lines"],
428            )
429            for elem in r["elements"]
430            if "source_mapping" in elem
431            and "filename_absolute" in elem["source_mapping"]
432            and "lines" in elem["source_mapping"]
433            and len(elem["source_mapping"]["lines"]) > 0
434        )
435
436        for file, lines in mapping_elements_with_lines:
437
438            # Check if result is within an ignored range.
439            ignore_ranges = self._ignore_ranges[file][r["check"]] + self._ignore_ranges[file]["all"]
440            for start, end in ignore_ranges:
441                # The full check must be within the ignore range to be ignored.
442                if start < lines[0] and end > lines[-1]:
443                    return True
444
445            # Check for next-line matchers.
446            ignore_line_index = min(lines) - 1
447            ignore_line_text = self.crytic_compile.get_code_from_line(file, ignore_line_index)
448            if ignore_line_text:
449                match = re.findall(
450                    r"^\s*//\s*slither-disable-next-line\s*([a-zA-Z0-9_,-]*)",
451                    ignore_line_text.decode("utf8"),
452                )
453                if match:
454                    ignored = match[0].split(",")
455                    if ignored and ("all" in ignored or any(r["check"] == c for c in ignored)):
456                        return True
457
458        return False
459
460    def valid_result(self, r: Dict) -> bool:
461        """
462        Check if the result is valid
463        A result is invalid if:
464            - All its source paths belong to the source path filtered
465            - Or a similar result was reported and saved during a previous run
466            - The --exclude-dependencies flag is set and results are only related to dependencies
467            - There is an ignore comment on the preceding line or in the file
468        """
469
470        # Remove duplicate due to the multiple compilation support
471        if r["id"] in self._currently_seen_resuts:
472            return False
473        self._currently_seen_resuts.add(r["id"])
474
475        source_mapping_elements = [
476            elem["source_mapping"].get("filename_absolute", "unknown")
477            for elem in r["elements"]
478            if "source_mapping" in elem
479        ]
480
481        # Use POSIX-style paths so that filter_paths|include_paths works across different
482        # OSes. Convert to a list so elements don't get consumed and are lost
483        # while evaluating the first pattern
484        source_mapping_elements = list(
485            map(lambda x: pathlib.Path(x).resolve().as_posix() if x else x, source_mapping_elements)
486        )
487        (matching, paths, msg_err) = (
488            (True, self._paths_to_include, "--include-paths")
489            if self._paths_to_include
490            else (False, self._paths_to_filter, "--filter-paths")
491        )
492
493        for path in paths:
494            try:
495                if any(
496                    bool(re.search(_relative_path_format(path), src_mapping))
497                    for src_mapping in source_mapping_elements
498                ):
499                    matching = not matching
500                    break
501            except re.error:
502                logger.error(
503                    f"Incorrect regular expression for {msg_err} {path}."
504                    "\nSlither supports the Python re format"
505                    ": https://docs.python.org/3/library/re.html"
506                )
507
508        if r["elements"] and matching:
509            return False
510
511        if self._show_ignored_findings:
512            return True
513        if self.has_ignore_comment(r):
514            return False
515        if r["id"] in self._previous_results_ids:
516            return False
517        if r["elements"] and self._exclude_dependencies:
518            if all(element["source_mapping"]["is_dependency"] for element in r["elements"]):
519                return False
520        # Conserve previous result filtering. This is conserved for compatibility, but is meant to be removed
521        if r["description"] in [pr["description"] for pr in self._previous_results]:
522            return False
523
524        return True
525
526    def load_previous_results(self) -> None:
527        self.load_previous_results_from_sarif()
528
529        filename = self._previous_results_filename
530        try:
531            if os.path.isfile(filename):
532                with open(filename, encoding="utf8") as f:
533                    self._previous_results = json.load(f)
534                    if self._previous_results:
535                        for r in self._previous_results:
536                            if "id" in r:
537                                self._previous_results_ids.add(r["id"])
538
539        except json.decoder.JSONDecodeError:
540            logger.error(red(f"Impossible to decode {filename}. Consider removing the file"))
541
542    def load_previous_results_from_sarif(self) -> None:
543        sarif = pathlib.Path(self.sarif_input)
544        triage = pathlib.Path(self.sarif_triage)
545
546        if not sarif.exists():
547            return
548        if not triage.exists():
549            return
550
551        triaged = read_triage_info(sarif, triage)
552
553        for id_triaged in triaged:
554            self._previous_results_ids.add(id_triaged)
555
556    def write_results_to_hide(self) -> None:
557        if not self._results_to_hide:
558            return
559        filename = self._previous_results_filename
560        with open(filename, "w", encoding="utf8") as f:
561            results = self._results_to_hide + self._previous_results
562            json.dump(results, f)
563
564    def save_results_to_hide(self, results: List[Dict]) -> None:
565        self._results_to_hide += results
566        self.write_results_to_hide()
567
568    def add_path_to_filter(self, path: str):
569        """
570        Add path to filter
571        Path are used through direct comparison (no regex)
572        """
573        self._paths_to_filter.add(path)
574
575    def add_path_to_include(self, path: str):
576        """
577        Add path to include
578        Path are used through direct comparison (no regex)
579        """
580        self._paths_to_include.add(path)
581
582    # endregion
583    ###################################################################################
584    ###################################################################################
585    # region Crytic compile
586    ###################################################################################
587    ###################################################################################
588
589    @property
590    def crytic_compile(self) -> CryticCompile:
591        return self._crytic_compile  # type: ignore
592
593    # endregion
594    ###################################################################################
595    ###################################################################################
596    # region Format
597    ###################################################################################
598    ###################################################################################
599
600    @property
601    def generate_patches(self) -> bool:
602        return self._generate_patches
603
604    @generate_patches.setter
605    def generate_patches(self, p: bool):
606        self._generate_patches = p
607
608    # endregion
609    ###################################################################################
610    ###################################################################################
611    # region Internals
612    ###################################################################################
613    ###################################################################################
614
615    @property
616    def disallow_partial(self) -> bool:
617        """
618        Return true if partial analyses are disallowed
619        For example, codebase with duplicate names will lead to partial analyses
620
621        :return:
622        """
623        return self._disallow_partial
624
625    @property
626    def skip_assembly(self) -> bool:
627        return self._skip_assembly
628
629    @property
630    def show_ignore_findings(self) -> bool:
631        return self._show_ignored_findings
632
633    # endregion

Slither static analyzer

sarif_input: str
sarif_triage: str
line_prefix: str
no_fail
skip_data_dependency
compilation_units: List[slither.core.compilation_unit.SlitherCompilationUnit]
109    @property
110    def compilation_units(self) -> List[SlitherCompilationUnit]:
111        return list(self._compilation_units)
def add_compilation_unit( self, compilation_unit: slither.core.compilation_unit.SlitherCompilationUnit):
113    def add_compilation_unit(self, compilation_unit: SlitherCompilationUnit):
114        self._compilation_units.append(compilation_unit)
contracts: List[slither.core.declarations.contract.Contract]
123    @property
124    def contracts(self) -> List[Contract]:
125        if not self._contracts:
126            all_contracts = [
127                compilation_unit.contracts for compilation_unit in self._compilation_units
128            ]
129            self._contracts = [item for sublist in all_contracts for item in sublist]
130        return self._contracts
contracts_derived: List[slither.core.declarations.contract.Contract]
132    @property
133    def contracts_derived(self) -> List[Contract]:
134        if not self._contracts_derived:
135            all_contracts = [
136                compilation_unit.contracts_derived for compilation_unit in self._compilation_units
137            ]
138            self._contracts_derived = [item for sublist in all_contracts for item in sublist]
139        return self._contracts_derived
def get_contract_from_name( self, contract_name: Union[str, slither.slithir.variables.constant.Constant]) -> List[slither.core.declarations.contract.Contract]:
141    def get_contract_from_name(self, contract_name: Union[str, Constant]) -> List[Contract]:
142        """
143            Return a contract from a name
144        Args:
145            contract_name (str): name of the contract
146        Returns:
147            Contract
148        """
149        contracts = []
150        for compilation_unit in self._compilation_units:
151            contracts += compilation_unit.get_contract_from_name(contract_name)
152        return contracts

Return a contract from a name Args: contract_name (str): name of the contract Returns: Contract

source_code: Dict[str, str]
160    @property
161    def source_code(self) -> Dict[str, str]:
162        """{filename: source_code (str)}: source code"""
163        return self._raw_source_code

{filename: source_code (str)}: source code

filename: Union[str, NoneType]
165    @property
166    def filename(self) -> Optional[str]:
167        """str: Filename."""
168        return self._filename

str: Filename.

def add_source_code(self, path: str) -> None:
174    def add_source_code(self, path: str) -> None:
175        """
176        :param path:
177        :return:
178        """
179        if self.crytic_compile and path in self.crytic_compile.src_content:
180            self.source_code[path] = self.crytic_compile.src_content[path]
181        else:
182            with open(path, encoding="utf8", newline="") as f:
183                self.source_code[path] = f.read()
184
185        self.parse_ignore_comments(path)
Parameters
  • path:
Returns
markdown_root: str
187    @property
188    def markdown_root(self) -> str:
189        return self._markdown_root
def print_functions(self, d: str):
191    def print_functions(self, d: str):
192        """
193        Export all the functions to dot files
194        """
195        for compilation_unit in self._compilation_units:
196            for c in compilation_unit.contracts:
197                for f in c.functions:
198                    f.cfg_to_dot(os.path.join(d, f"{c.name}.{f.name}.dot"))

Export all the functions to dot files

def offset_to_references( self, filename_str: str, offset: int) -> Set[slither.core.source_mapping.source_mapping.Source]:
344    def offset_to_references(self, filename_str: str, offset: int) -> Set[Source]:
345        if self._offset_to_references is None:
346            self._compute_offsets_to_ref_impl_decl()
347
348        return self._get_offset(self._offset_to_references, filename_str, offset)
def offset_to_implementations( self, filename_str: str, offset: int) -> Set[slither.core.source_mapping.source_mapping.Source]:
350    def offset_to_implementations(self, filename_str: str, offset: int) -> Set[Source]:
351        if self._offset_to_implementations is None:
352            self._compute_offsets_to_ref_impl_decl()
353
354        return self._get_offset(self._offset_to_implementations, filename_str, offset)
def offset_to_definitions( self, filename_str: str, offset: int) -> Set[slither.core.source_mapping.source_mapping.Source]:
356    def offset_to_definitions(self, filename_str: str, offset: int) -> Set[Source]:
357        if self._offset_to_definitions is None:
358            self._compute_offsets_to_ref_impl_decl()
359
360        return self._get_offset(self._offset_to_definitions, filename_str, offset)
def offset_to_objects( self, filename_str: str, offset: int) -> Set[slither.core.source_mapping.source_mapping.SourceMapping]:
362    def offset_to_objects(self, filename_str: str, offset: int) -> Set[SourceMapping]:
363        if self._offset_to_objects is None:
364            self._compute_offsets_to_ref_impl_decl()
365
366        return self._get_offset(self._offset_to_objects, filename_str, offset)
def parse_ignore_comments(self, file: str) -> None:
375    def parse_ignore_comments(self, file: str) -> None:
376        # The first time we check a file, find all start/end ignore comments and memoize them.
377        line_number = 1
378        while True:
379
380            line_text = self.crytic_compile.get_code_from_line(file, line_number)
381            if line_text is None:
382                break
383
384            start_regex = r"^\s*//\s*slither-disable-start\s*([a-zA-Z0-9_,-]*)"
385            end_regex = r"^\s*//\s*slither-disable-end\s*([a-zA-Z0-9_,-]*)"
386            start_match = re.findall(start_regex, line_text.decode("utf8"))
387            end_match = re.findall(end_regex, line_text.decode("utf8"))
388
389            if start_match:
390                ignored = start_match[0].split(",")
391                if ignored:
392                    for check in ignored:
393                        vals = self._ignore_ranges[file][check]
394                        if len(vals) == 0 or vals[-1][1] != float("inf"):
395                            # First item in the array, or the prior item is fully populated.
396                            self._ignore_ranges[file][check].append((line_number, float("inf")))
397                        else:
398                            logger.error(
399                                f"Consecutive slither-disable-starts without slither-disable-end in {file}#{line_number}"
400                            )
401                            return
402
403            if end_match:
404                ignored = end_match[0].split(",")
405                if ignored:
406                    for check in ignored:
407                        vals = self._ignore_ranges[file][check]
408                        if len(vals) == 0 or vals[-1][1] != float("inf"):
409                            logger.error(
410                                f"slither-disable-end without slither-disable-start in {file}#{line_number}"
411                            )
412                            return
413                        self._ignore_ranges[file][check][-1] = (vals[-1][0], line_number)
414
415            line_number += 1
def has_ignore_comment(self, r: Dict) -> bool:
417    def has_ignore_comment(self, r: Dict) -> bool:
418        """
419        Check if the result has an ignore comment in the file or on the preceding line, in which
420        case, it is not valid
421        """
422        if not self.crytic_compile:
423            return False
424        mapping_elements_with_lines = (
425            (
426                posixpath.normpath(elem["source_mapping"]["filename_absolute"]),
427                elem["source_mapping"]["lines"],
428            )
429            for elem in r["elements"]
430            if "source_mapping" in elem
431            and "filename_absolute" in elem["source_mapping"]
432            and "lines" in elem["source_mapping"]
433            and len(elem["source_mapping"]["lines"]) > 0
434        )
435
436        for file, lines in mapping_elements_with_lines:
437
438            # Check if result is within an ignored range.
439            ignore_ranges = self._ignore_ranges[file][r["check"]] + self._ignore_ranges[file]["all"]
440            for start, end in ignore_ranges:
441                # The full check must be within the ignore range to be ignored.
442                if start < lines[0] and end > lines[-1]:
443                    return True
444
445            # Check for next-line matchers.
446            ignore_line_index = min(lines) - 1
447            ignore_line_text = self.crytic_compile.get_code_from_line(file, ignore_line_index)
448            if ignore_line_text:
449                match = re.findall(
450                    r"^\s*//\s*slither-disable-next-line\s*([a-zA-Z0-9_,-]*)",
451                    ignore_line_text.decode("utf8"),
452                )
453                if match:
454                    ignored = match[0].split(",")
455                    if ignored and ("all" in ignored or any(r["check"] == c for c in ignored)):
456                        return True
457
458        return False

Check if the result has an ignore comment in the file or on the preceding line, in which case, it is not valid

def valid_result(self, r: Dict) -> bool:
460    def valid_result(self, r: Dict) -> bool:
461        """
462        Check if the result is valid
463        A result is invalid if:
464            - All its source paths belong to the source path filtered
465            - Or a similar result was reported and saved during a previous run
466            - The --exclude-dependencies flag is set and results are only related to dependencies
467            - There is an ignore comment on the preceding line or in the file
468        """
469
470        # Remove duplicate due to the multiple compilation support
471        if r["id"] in self._currently_seen_resuts:
472            return False
473        self._currently_seen_resuts.add(r["id"])
474
475        source_mapping_elements = [
476            elem["source_mapping"].get("filename_absolute", "unknown")
477            for elem in r["elements"]
478            if "source_mapping" in elem
479        ]
480
481        # Use POSIX-style paths so that filter_paths|include_paths works across different
482        # OSes. Convert to a list so elements don't get consumed and are lost
483        # while evaluating the first pattern
484        source_mapping_elements = list(
485            map(lambda x: pathlib.Path(x).resolve().as_posix() if x else x, source_mapping_elements)
486        )
487        (matching, paths, msg_err) = (
488            (True, self._paths_to_include, "--include-paths")
489            if self._paths_to_include
490            else (False, self._paths_to_filter, "--filter-paths")
491        )
492
493        for path in paths:
494            try:
495                if any(
496                    bool(re.search(_relative_path_format(path), src_mapping))
497                    for src_mapping in source_mapping_elements
498                ):
499                    matching = not matching
500                    break
501            except re.error:
502                logger.error(
503                    f"Incorrect regular expression for {msg_err} {path}."
504                    "\nSlither supports the Python re format"
505                    ": https://docs.python.org/3/library/re.html"
506                )
507
508        if r["elements"] and matching:
509            return False
510
511        if self._show_ignored_findings:
512            return True
513        if self.has_ignore_comment(r):
514            return False
515        if r["id"] in self._previous_results_ids:
516            return False
517        if r["elements"] and self._exclude_dependencies:
518            if all(element["source_mapping"]["is_dependency"] for element in r["elements"]):
519                return False
520        # Conserve previous result filtering. This is conserved for compatibility, but is meant to be removed
521        if r["description"] in [pr["description"] for pr in self._previous_results]:
522            return False
523
524        return True

Check if the result is valid A result is invalid if: - All its source paths belong to the source path filtered - Or a similar result was reported and saved during a previous run - The --exclude-dependencies flag is set and results are only related to dependencies - There is an ignore comment on the preceding line or in the file

def load_previous_results(self) -> None:
526    def load_previous_results(self) -> None:
527        self.load_previous_results_from_sarif()
528
529        filename = self._previous_results_filename
530        try:
531            if os.path.isfile(filename):
532                with open(filename, encoding="utf8") as f:
533                    self._previous_results = json.load(f)
534                    if self._previous_results:
535                        for r in self._previous_results:
536                            if "id" in r:
537                                self._previous_results_ids.add(r["id"])
538
539        except json.decoder.JSONDecodeError:
540            logger.error(red(f"Impossible to decode {filename}. Consider removing the file"))
def load_previous_results_from_sarif(self) -> None:
542    def load_previous_results_from_sarif(self) -> None:
543        sarif = pathlib.Path(self.sarif_input)
544        triage = pathlib.Path(self.sarif_triage)
545
546        if not sarif.exists():
547            return
548        if not triage.exists():
549            return
550
551        triaged = read_triage_info(sarif, triage)
552
553        for id_triaged in triaged:
554            self._previous_results_ids.add(id_triaged)
def write_results_to_hide(self) -> None:
556    def write_results_to_hide(self) -> None:
557        if not self._results_to_hide:
558            return
559        filename = self._previous_results_filename
560        with open(filename, "w", encoding="utf8") as f:
561            results = self._results_to_hide + self._previous_results
562            json.dump(results, f)
def save_results_to_hide(self, results: List[Dict]) -> None:
564    def save_results_to_hide(self, results: List[Dict]) -> None:
565        self._results_to_hide += results
566        self.write_results_to_hide()
def add_path_to_filter(self, path: str):
568    def add_path_to_filter(self, path: str):
569        """
570        Add path to filter
571        Path are used through direct comparison (no regex)
572        """
573        self._paths_to_filter.add(path)

Add path to filter Path are used through direct comparison (no regex)

def add_path_to_include(self, path: str):
575    def add_path_to_include(self, path: str):
576        """
577        Add path to include
578        Path are used through direct comparison (no regex)
579        """
580        self._paths_to_include.add(path)

Add path to include Path are used through direct comparison (no regex)

crytic_compile: crytic_compile.crytic_compile.CryticCompile
589    @property
590    def crytic_compile(self) -> CryticCompile:
591        return self._crytic_compile  # type: ignore
generate_patches: bool
600    @property
601    def generate_patches(self) -> bool:
602        return self._generate_patches
disallow_partial: bool
615    @property
616    def disallow_partial(self) -> bool:
617        """
618        Return true if partial analyses are disallowed
619        For example, codebase with duplicate names will lead to partial analyses
620
621        :return:
622        """
623        return self._disallow_partial

Return true if partial analyses are disallowed For example, codebase with duplicate names will lead to partial analyses

Returns
skip_assembly: bool
625    @property
626    def skip_assembly(self) -> bool:
627        return self._skip_assembly
show_ignore_findings: bool
629    @property
630    def show_ignore_findings(self) -> bool:
631        return self._show_ignored_findings