Coverage for functions \ flipdare \ service \ safety \ safety_types.py: 100%

0 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-05-08 12:22 +1000

1#!/usr/bin/env python 

2# Copyright (c) 2026 Flipdare Pty Ltd. All rights reserved. 

3# 

4# This file is part of Flipdare's proprietary software and contains 

5# confidential and copyrighted material. Unauthorised copying, 

6# modification, distribution, or use of this file is strictly 

7# prohibited without prior written permission from Flipdare Pty Ltd. 

8# 

9# This software includes third-party components licensed under MIT, 

10# BSD, and Apache 2.0 licences. See THIRD_PARTY_NOTICES for details. 

11# 

12 

13# pragma: no cover 

14from __future__ import annotations 

15 

16from enum import StrEnum 

17from dataclasses import dataclass 

18from flipdare.generated.shared.model.core.stopwatch_duration import StopwatchDuration 

19from flipdare.generated.shared.model.restriction.moderation_decision import ModerationDecision 

20from flipdare.generated import RestrictionAction 

21 

22__all__ = [ 

23 "RestrictionResult", 

24 "ModerationType", 

25 "ModerationCategory", 

26 "ReputationOutcome", 

27 "ModerationAssessment", 

28 "ModerationOutcome", 

29] 

30 

31 

32@dataclass(frozen=True) 

33class RestrictionResult: 

34 action: RestrictionAction 

35 duration: StopwatchDuration 

36 

37 

38@dataclass(frozen=True) 

39class ReputationOutcome: 

40 new_reputation: int 

41 should_analyze: bool 

42 

43 

44@dataclass(frozen=True) 

45class ModerationAssessment: 

46 score: float 

47 moderation_type: ModerationType 

48 moderation_category: ModerationCategory | None 

49 

50 

51@dataclass(frozen=True) 

52class ModerationOutcome: 

53 decision: ModerationDecision 

54 new_reputation: int 

55 assessment: ModerationAssessment | None = None 

56 

57 @property 

58 def is_approved(self) -> bool: 

59 return self.decision.is_approved 

60 

61 

62class ModerationType(StrEnum): 

63 SAFE = "Safe" 

64 REVIEW = "Review" 

65 FLAGGED = "Flagged" 

66 

67 

68class ModerationCategory(StrEnum): 

69 TOXIC = "Toxic" 

70 INSULT = "Insult" 

71 PROFANITY = "Profanity" 

72 DEROGATORY = "Derogatory" 

73 SEXUAL = "Sexual" 

74 DEATH_HARM_TRAGEDY = "Death, Harm & Tragedy" 

75 VIOLENT = "Violent" 

76 FIREARMS_WEAPONS = "Firearms & Weapons" 

77 PUBLIC_SAFETY = "Public Safety" 

78 HEALTH = "Health" 

79 RELIGION_BELIEF = "Religion & Belief" 

80 ILLICIT_DRUGS = "Illicit Drugs" 

81 WAR_CONFLICT = "War & Conflict" 

82 POLITICS = "Politics" 

83 FINANCE = "Finance" 

84 LEGAL = "Legal" 

85 

86 @staticmethod 

87 def from_string(value: str) -> ModerationCategory: 

88 try: 

89 return ModerationCategory(value) 

90 except ValueError as err: 

91 msg = f"Invalid ModerationCategory: {value}" 

92 raise ValueError(msg) from err 

93 

94 @property 

95 def label(self) -> str: 

96 return self.value 

97 

98 @property 

99 def weighting(self) -> float: 

100 weightings = { 

101 # death/sexual violence get highest weightings 

102 ModerationCategory.DEATH_HARM_TRAGEDY: 1.0, 

103 ModerationCategory.SEXUAL: 0.8, 

104 ModerationCategory.TOXIC: 0.4, 

105 ModerationCategory.VIOLENT: 0.4, 

106 ModerationCategory.FIREARMS_WEAPONS: 0.3, 

107 ModerationCategory.INSULT: 0.1, 

108 ModerationCategory.DEROGATORY: 0.1, 

109 # unused so far 

110 ModerationCategory.PROFANITY: 0.0, 

111 ModerationCategory.PUBLIC_SAFETY: 0.0, 

112 ModerationCategory.HEALTH: 0.0, 

113 ModerationCategory.RELIGION_BELIEF: 0.0, 

114 ModerationCategory.ILLICIT_DRUGS: 0.0, 

115 ModerationCategory.WAR_CONFLICT: 0.0, 

116 ModerationCategory.POLITICS: 0.0, 

117 ModerationCategory.FINANCE: 0.0, 

118 ModerationCategory.LEGAL: 0.0, 

119 } 

120 return weightings.get(self, 0.0)