mcmas.logic.complexity
A collection of (very unsophisticated!) heuristics for extracting metadata from logical expressions, including details about their time/space complexity. Consider this is a placeholder for something smarter ;) Weights in particular are suggested by AI, and NOT based on close reading of all the literature on theoretical analysis.
1""" 2mcmas.logic.complexity. 3 4A collection of (very unsophisticated!) heuristics for extracting 5metadata from logical expressions, including details about their 6time/space complexity. Consider this is a placeholder for 7something smarter ;) Weights in particular are suggested by AI, 8and NOT based on close reading of all the literature on 9theoretical analysis. 10""" 11 12import math 13import re 14 15from mcmas import typing 16from mcmas.models.spec import * # noqa 17 18# Basic propositional logic 19PROPOSITIONAL = 0.1 20 21# Temporal operators (CTL/LTL) 22TEMPORAL_BASIC = 0.3 # X, F, G 23TEMPORAL_UNTIL = 0.4 # U (more complex) 24 25# Path quantifiers 26PATH_QUANTIFIER = 0.2 # A, E 27 28# Epistemic operators 29KNOWLEDGE_SINGLE = 0.5 # K(i, φ) - PSPACE 30KNOWLEDGE_GROUP = 0.6 # GK(G, φ) 31KNOWLEDGE_DISTRIBUTED = 0.7 # DK(G, φ) 32COMMON_KNOWLEDGE = 0.9 # GCK(G, φ) - EXPSPACE, approaching undecidable 33 34# Strategic operators (ATL) 35STRATEGIC_BASIC = 0.6 # <group>X, <group>F, <group>G 36STRATEGIC_COMPLEX = 0.8 # Complex coalition strategies 37 38# Deontic logic 39OBLIGATION = 0.2 # O(φ) 40 41# Nesting penalty (exponential growth) 42NESTING_BASE = 1.3 43MAX_REASONABLE_DEPTH = 10 44 45 46# Regex patterns for different operator types 47PATTERNS = { 48 # Temporal operators 49 "temporal_basic": r"[XFG]\s*\(", 50 "temporal_until": r"U\s*\(", 51 # Path quantifiers 52 "path_quantifier": r"[AE]\s*\(", 53 # CTL combinations 54 "ctl_always_global": r"AG\s*\(", 55 "ctl_eventually_possible": r"EF\s*\(", 56 "ctl_always_next": r"AX\s*\(", 57 "ctl_always_eventually": r"AF\s*\(", 58 "ctl_possibly_always": r"EG\s*\(", 59 # Epistemic operators 60 "knowledge_single": r"K\s*\(\s*\w+\s*,", 61 "knowledge_group": r"GK\s*\(\s*\{[^}]+\}\s*,", 62 "knowledge_distributed": r"DK\s*\(\s*\{[^}]+\}\s*,", 63 "common_knowledge": r"GCK\s*\(.*\)", 64 # Strategic operators (ATL) 65 "strategic": r"<[^>]+>[XFG]\s*\(", 66 # Deontic operators 67 "obligation": r"O\s*\(", 68 # Propositional 69 "propositional": r"[a-z]\w*(?!\s*\()", 70} 71 72 73class ExpressionAnalyzer: 74 75 def extract_agents_and_groups(self, expression: str) -> typing.Tuple[int, int, int]: 76 """ 77 Extract number of agents, groups, and coalition size. 78 """ 79 agents = set() 80 groups = [] 81 max_coalition_size = 0 82 83 # Find individual agents in K(agent, ...) 84 agent_matches = re.findall(r"K\s*\(\s*(\w+)\s*,", expression) 85 agents.update(agent_matches) 86 87 # Find groups in GK({...}), DK({...}), GCK({...}) 88 group_matches = re.findall(r"[GD]?[CK]+\s*\(\s*\{([^}]+)\}", expression) 89 for group_str in group_matches: 90 group_agents = [a.strip() for a in group_str.split(",")] 91 groups.append(group_agents) 92 agents.update(group_agents) 93 max_coalition_size = max(max_coalition_size, len(group_agents)) 94 95 # Find strategic coalitions <...> 96 coalition_matches = re.findall(r"<([^>]+)>", expression) 97 for coalition_str in coalition_matches: 98 coalition_agents = [a.strip() for a in coalition_str.split(",")] 99 agents.update(coalition_agents) 100 max_coalition_size = max(max_coalition_size, len(coalition_agents)) 101 102 return len(agents), len(groups), max_coalition_size 103 104 def calculate_nesting_coefficient(self, expression: str) -> int: 105 """Calculate maximum nesting depth of operators 106 Simplified approach: count maximum parentheses nesting 107 """ 108 max_depth = 0 109 current_depth = 0 110 111 for char in expression: 112 if char == "(": 113 current_depth += 1 114 max_depth = max(max_depth, current_depth) 115 elif char == ")": 116 current_depth -= 1 117 118 return max_depth 119 120 def count_operator_occurrences(self, expression: str) -> OperatorStatistics: 121 """ 122 Count occurrences of each operator type. 123 """ 124 op_stats = OperatorStatistics() 125 for op_type, pattern in PATTERNS.items(): 126 matches = re.findall(pattern, expression, re.IGNORECASE) 127 setattr(op_stats, op_type, len(matches)) 128 return op_stats 129 130 def calculate_base_complexity( 131 self, op_stats: typing.Dict[str, int], num_agents: int, max_coalition: int 132 ) -> float: 133 """ 134 Calculate base complexity score from operator counts. 135 """ 136 score = 0.0 137 138 # Temporal operators 139 score += op_stats.temporal_basic * TEMPORAL_BASIC 140 score += op_stats.temporal_until * TEMPORAL_UNTIL 141 142 # Path quantifiers 143 score += op_stats.path_quantifier * PATH_QUANTIFIER 144 145 # CTL combinations (higher weight than individual components) 146 ctl_ops = [ 147 "ctl_always_global", 148 "ctl_eventually_possible", 149 "ctl_always_next", 150 "ctl_always_eventually", 151 "ctl_possibly_always", 152 ] 153 for op in ctl_ops: 154 score += getattr(op_stats, op) * (TEMPORAL_BASIC + PATH_QUANTIFIER) 155 156 # Epistemic operators (scaled by number of agents) 157 agent_factor = 1 + math.log2(max(1, num_agents)) 158 score += op_stats.knowledge_single * KNOWLEDGE_SINGLE * agent_factor 159 score += op_stats.knowledge_group * KNOWLEDGE_GROUP * agent_factor 160 score += op_stats.knowledge_distributed * KNOWLEDGE_DISTRIBUTED * agent_factor 161 162 # Common knowledge (exponential in agent count, capped near 1.0) 163 ck_count = op_stats.common_knowledge 164 if ck_count > 0: 165 ck_complexity = COMMON_KNOWLEDGE * (1 + num_agents * 0.05) 166 score += ck_count * min(ck_complexity, 0.95) 167 168 # Strategic operators (exponential in coalition size) 169 strategic_count = op_stats.strategic 170 if strategic_count > 0: 171 coalition_factor = 1 + math.log2(max(1, max_coalition)) 172 strategic_complexity = STRATEGIC_BASIC * coalition_factor 173 score += strategic_count * min(strategic_complexity, 0.85) 174 175 # Deontic operators 176 score += op_stats.obligation * OBLIGATION 177 178 # Propositional variables (minimal complexity) 179 score += op_stats.propositional * PROPOSITIONAL 180 181 return score 182 183 def apply_nesting_penalty(self, base_score: float, depth: int) -> float: 184 """ 185 Apply exponential penalty for nesting depth. 186 """ 187 if depth <= 1: 188 return base_score 189 190 # Exponential growth with depth, but bounded 191 depth_factor = min( 192 math.pow(NESTING_BASE, depth - 1), 10.0 # Cap the multiplier 193 ) 194 195 return base_score * depth_factor 196 197 @staticmethod 198 def normalize_score(raw_score: float) -> float: 199 """ 200 Normalize score to [0, 1] range using sigmoid-like 201 function. 202 """ 203 # Use a sigmoid function to map [0, ∞) to [0, 1) 204 # The function approaches 1 as scores get very high 205 return 1 - math.exp(-raw_score) 206 207 def analyze(self, expression: str, index: int = -1): 208 """ 209 Main function to analyze logical expression complexity. 210 211 Args: 212 expression: String containing logical formula 213 214 Returns: 215 Tuple of (complexity_score, analysis_details) 216 - complexity_score: Float between 0 and 1 217 - analysis_details: Dict with breakdown of analysis 218 """ 219 if not expression or not expression.strip(): 220 return 0.0, {"error": "Empty expression"} 221 222 # Clean and normalize expression 223 expression = expression.strip() 224 225 # Extract structural information 226 num_agents, num_groups, max_coalition = self.extract_agents_and_groups( 227 expression 228 ) 229 nesting_coefficient = self.calculate_nesting_coefficient(expression) 230 opstats = self.count_operator_occurrences(expression) 231 232 # Calculate base complexity 233 base_complexity = self.calculate_base_complexity( 234 opstats, num_agents, max_coalition 235 ) 236 237 # Apply nesting penalty 238 raw_score = self.apply_nesting_penalty(base_complexity, nesting_coefficient) 239 240 # Normalize to [0, 1] 241 final_score = self.normalize_score(raw_score) 242 243 # Prepare analysis details 244 from mcmas import fmtk 245 from mcmas.models import spec # logic import complexity 246 247 return spec.ComplexityAnalysis( 248 score=final_score, 249 formula=expression, 250 metadata=fmtk.AnalysisMeta( 251 formula_index=index, 252 base_complexity=base_complexity, 253 nesting_coefficient=nesting_coefficient, 254 raw_score=raw_score, 255 ).model_dump(exclude_unset=True), 256 statistics=ComplexityStats( 257 coalitions=CoalitionStats( 258 max_coalition_size=max_coalition, 259 num_agents=num_agents, 260 num_groups=num_groups, 261 ).model_dump(), 262 operators=opstats.model_dump(), 263 ).model_dump(), 264 ) 265 266 __call__ = analyze 267 268 269analyzer = ExpressionAnalyzer()
74class ExpressionAnalyzer: 75 76 def extract_agents_and_groups(self, expression: str) -> typing.Tuple[int, int, int]: 77 """ 78 Extract number of agents, groups, and coalition size. 79 """ 80 agents = set() 81 groups = [] 82 max_coalition_size = 0 83 84 # Find individual agents in K(agent, ...) 85 agent_matches = re.findall(r"K\s*\(\s*(\w+)\s*,", expression) 86 agents.update(agent_matches) 87 88 # Find groups in GK({...}), DK({...}), GCK({...}) 89 group_matches = re.findall(r"[GD]?[CK]+\s*\(\s*\{([^}]+)\}", expression) 90 for group_str in group_matches: 91 group_agents = [a.strip() for a in group_str.split(",")] 92 groups.append(group_agents) 93 agents.update(group_agents) 94 max_coalition_size = max(max_coalition_size, len(group_agents)) 95 96 # Find strategic coalitions <...> 97 coalition_matches = re.findall(r"<([^>]+)>", expression) 98 for coalition_str in coalition_matches: 99 coalition_agents = [a.strip() for a in coalition_str.split(",")] 100 agents.update(coalition_agents) 101 max_coalition_size = max(max_coalition_size, len(coalition_agents)) 102 103 return len(agents), len(groups), max_coalition_size 104 105 def calculate_nesting_coefficient(self, expression: str) -> int: 106 """Calculate maximum nesting depth of operators 107 Simplified approach: count maximum parentheses nesting 108 """ 109 max_depth = 0 110 current_depth = 0 111 112 for char in expression: 113 if char == "(": 114 current_depth += 1 115 max_depth = max(max_depth, current_depth) 116 elif char == ")": 117 current_depth -= 1 118 119 return max_depth 120 121 def count_operator_occurrences(self, expression: str) -> OperatorStatistics: 122 """ 123 Count occurrences of each operator type. 124 """ 125 op_stats = OperatorStatistics() 126 for op_type, pattern in PATTERNS.items(): 127 matches = re.findall(pattern, expression, re.IGNORECASE) 128 setattr(op_stats, op_type, len(matches)) 129 return op_stats 130 131 def calculate_base_complexity( 132 self, op_stats: typing.Dict[str, int], num_agents: int, max_coalition: int 133 ) -> float: 134 """ 135 Calculate base complexity score from operator counts. 136 """ 137 score = 0.0 138 139 # Temporal operators 140 score += op_stats.temporal_basic * TEMPORAL_BASIC 141 score += op_stats.temporal_until * TEMPORAL_UNTIL 142 143 # Path quantifiers 144 score += op_stats.path_quantifier * PATH_QUANTIFIER 145 146 # CTL combinations (higher weight than individual components) 147 ctl_ops = [ 148 "ctl_always_global", 149 "ctl_eventually_possible", 150 "ctl_always_next", 151 "ctl_always_eventually", 152 "ctl_possibly_always", 153 ] 154 for op in ctl_ops: 155 score += getattr(op_stats, op) * (TEMPORAL_BASIC + PATH_QUANTIFIER) 156 157 # Epistemic operators (scaled by number of agents) 158 agent_factor = 1 + math.log2(max(1, num_agents)) 159 score += op_stats.knowledge_single * KNOWLEDGE_SINGLE * agent_factor 160 score += op_stats.knowledge_group * KNOWLEDGE_GROUP * agent_factor 161 score += op_stats.knowledge_distributed * KNOWLEDGE_DISTRIBUTED * agent_factor 162 163 # Common knowledge (exponential in agent count, capped near 1.0) 164 ck_count = op_stats.common_knowledge 165 if ck_count > 0: 166 ck_complexity = COMMON_KNOWLEDGE * (1 + num_agents * 0.05) 167 score += ck_count * min(ck_complexity, 0.95) 168 169 # Strategic operators (exponential in coalition size) 170 strategic_count = op_stats.strategic 171 if strategic_count > 0: 172 coalition_factor = 1 + math.log2(max(1, max_coalition)) 173 strategic_complexity = STRATEGIC_BASIC * coalition_factor 174 score += strategic_count * min(strategic_complexity, 0.85) 175 176 # Deontic operators 177 score += op_stats.obligation * OBLIGATION 178 179 # Propositional variables (minimal complexity) 180 score += op_stats.propositional * PROPOSITIONAL 181 182 return score 183 184 def apply_nesting_penalty(self, base_score: float, depth: int) -> float: 185 """ 186 Apply exponential penalty for nesting depth. 187 """ 188 if depth <= 1: 189 return base_score 190 191 # Exponential growth with depth, but bounded 192 depth_factor = min( 193 math.pow(NESTING_BASE, depth - 1), 10.0 # Cap the multiplier 194 ) 195 196 return base_score * depth_factor 197 198 @staticmethod 199 def normalize_score(raw_score: float) -> float: 200 """ 201 Normalize score to [0, 1] range using sigmoid-like 202 function. 203 """ 204 # Use a sigmoid function to map [0, ∞) to [0, 1) 205 # The function approaches 1 as scores get very high 206 return 1 - math.exp(-raw_score) 207 208 def analyze(self, expression: str, index: int = -1): 209 """ 210 Main function to analyze logical expression complexity. 211 212 Args: 213 expression: String containing logical formula 214 215 Returns: 216 Tuple of (complexity_score, analysis_details) 217 - complexity_score: Float between 0 and 1 218 - analysis_details: Dict with breakdown of analysis 219 """ 220 if not expression or not expression.strip(): 221 return 0.0, {"error": "Empty expression"} 222 223 # Clean and normalize expression 224 expression = expression.strip() 225 226 # Extract structural information 227 num_agents, num_groups, max_coalition = self.extract_agents_and_groups( 228 expression 229 ) 230 nesting_coefficient = self.calculate_nesting_coefficient(expression) 231 opstats = self.count_operator_occurrences(expression) 232 233 # Calculate base complexity 234 base_complexity = self.calculate_base_complexity( 235 opstats, num_agents, max_coalition 236 ) 237 238 # Apply nesting penalty 239 raw_score = self.apply_nesting_penalty(base_complexity, nesting_coefficient) 240 241 # Normalize to [0, 1] 242 final_score = self.normalize_score(raw_score) 243 244 # Prepare analysis details 245 from mcmas import fmtk 246 from mcmas.models import spec # logic import complexity 247 248 return spec.ComplexityAnalysis( 249 score=final_score, 250 formula=expression, 251 metadata=fmtk.AnalysisMeta( 252 formula_index=index, 253 base_complexity=base_complexity, 254 nesting_coefficient=nesting_coefficient, 255 raw_score=raw_score, 256 ).model_dump(exclude_unset=True), 257 statistics=ComplexityStats( 258 coalitions=CoalitionStats( 259 max_coalition_size=max_coalition, 260 num_agents=num_agents, 261 num_groups=num_groups, 262 ).model_dump(), 263 operators=opstats.model_dump(), 264 ).model_dump(), 265 ) 266 267 __call__ = analyze
76 def extract_agents_and_groups(self, expression: str) -> typing.Tuple[int, int, int]: 77 """ 78 Extract number of agents, groups, and coalition size. 79 """ 80 agents = set() 81 groups = [] 82 max_coalition_size = 0 83 84 # Find individual agents in K(agent, ...) 85 agent_matches = re.findall(r"K\s*\(\s*(\w+)\s*,", expression) 86 agents.update(agent_matches) 87 88 # Find groups in GK({...}), DK({...}), GCK({...}) 89 group_matches = re.findall(r"[GD]?[CK]+\s*\(\s*\{([^}]+)\}", expression) 90 for group_str in group_matches: 91 group_agents = [a.strip() for a in group_str.split(",")] 92 groups.append(group_agents) 93 agents.update(group_agents) 94 max_coalition_size = max(max_coalition_size, len(group_agents)) 95 96 # Find strategic coalitions <...> 97 coalition_matches = re.findall(r"<([^>]+)>", expression) 98 for coalition_str in coalition_matches: 99 coalition_agents = [a.strip() for a in coalition_str.split(",")] 100 agents.update(coalition_agents) 101 max_coalition_size = max(max_coalition_size, len(coalition_agents)) 102 103 return len(agents), len(groups), max_coalition_size
Extract number of agents, groups, and coalition size.
105 def calculate_nesting_coefficient(self, expression: str) -> int: 106 """Calculate maximum nesting depth of operators 107 Simplified approach: count maximum parentheses nesting 108 """ 109 max_depth = 0 110 current_depth = 0 111 112 for char in expression: 113 if char == "(": 114 current_depth += 1 115 max_depth = max(max_depth, current_depth) 116 elif char == ")": 117 current_depth -= 1 118 119 return max_depth
Calculate maximum nesting depth of operators Simplified approach: count maximum parentheses nesting
121 def count_operator_occurrences(self, expression: str) -> OperatorStatistics: 122 """ 123 Count occurrences of each operator type. 124 """ 125 op_stats = OperatorStatistics() 126 for op_type, pattern in PATTERNS.items(): 127 matches = re.findall(pattern, expression, re.IGNORECASE) 128 setattr(op_stats, op_type, len(matches)) 129 return op_stats
Count occurrences of each operator type.
131 def calculate_base_complexity( 132 self, op_stats: typing.Dict[str, int], num_agents: int, max_coalition: int 133 ) -> float: 134 """ 135 Calculate base complexity score from operator counts. 136 """ 137 score = 0.0 138 139 # Temporal operators 140 score += op_stats.temporal_basic * TEMPORAL_BASIC 141 score += op_stats.temporal_until * TEMPORAL_UNTIL 142 143 # Path quantifiers 144 score += op_stats.path_quantifier * PATH_QUANTIFIER 145 146 # CTL combinations (higher weight than individual components) 147 ctl_ops = [ 148 "ctl_always_global", 149 "ctl_eventually_possible", 150 "ctl_always_next", 151 "ctl_always_eventually", 152 "ctl_possibly_always", 153 ] 154 for op in ctl_ops: 155 score += getattr(op_stats, op) * (TEMPORAL_BASIC + PATH_QUANTIFIER) 156 157 # Epistemic operators (scaled by number of agents) 158 agent_factor = 1 + math.log2(max(1, num_agents)) 159 score += op_stats.knowledge_single * KNOWLEDGE_SINGLE * agent_factor 160 score += op_stats.knowledge_group * KNOWLEDGE_GROUP * agent_factor 161 score += op_stats.knowledge_distributed * KNOWLEDGE_DISTRIBUTED * agent_factor 162 163 # Common knowledge (exponential in agent count, capped near 1.0) 164 ck_count = op_stats.common_knowledge 165 if ck_count > 0: 166 ck_complexity = COMMON_KNOWLEDGE * (1 + num_agents * 0.05) 167 score += ck_count * min(ck_complexity, 0.95) 168 169 # Strategic operators (exponential in coalition size) 170 strategic_count = op_stats.strategic 171 if strategic_count > 0: 172 coalition_factor = 1 + math.log2(max(1, max_coalition)) 173 strategic_complexity = STRATEGIC_BASIC * coalition_factor 174 score += strategic_count * min(strategic_complexity, 0.85) 175 176 # Deontic operators 177 score += op_stats.obligation * OBLIGATION 178 179 # Propositional variables (minimal complexity) 180 score += op_stats.propositional * PROPOSITIONAL 181 182 return score
Calculate base complexity score from operator counts.
184 def apply_nesting_penalty(self, base_score: float, depth: int) -> float: 185 """ 186 Apply exponential penalty for nesting depth. 187 """ 188 if depth <= 1: 189 return base_score 190 191 # Exponential growth with depth, but bounded 192 depth_factor = min( 193 math.pow(NESTING_BASE, depth - 1), 10.0 # Cap the multiplier 194 ) 195 196 return base_score * depth_factor
Apply exponential penalty for nesting depth.
198 @staticmethod 199 def normalize_score(raw_score: float) -> float: 200 """ 201 Normalize score to [0, 1] range using sigmoid-like 202 function. 203 """ 204 # Use a sigmoid function to map [0, ∞) to [0, 1) 205 # The function approaches 1 as scores get very high 206 return 1 - math.exp(-raw_score)
Normalize score to [0, 1] range using sigmoid-like function.
208 def analyze(self, expression: str, index: int = -1): 209 """ 210 Main function to analyze logical expression complexity. 211 212 Args: 213 expression: String containing logical formula 214 215 Returns: 216 Tuple of (complexity_score, analysis_details) 217 - complexity_score: Float between 0 and 1 218 - analysis_details: Dict with breakdown of analysis 219 """ 220 if not expression or not expression.strip(): 221 return 0.0, {"error": "Empty expression"} 222 223 # Clean and normalize expression 224 expression = expression.strip() 225 226 # Extract structural information 227 num_agents, num_groups, max_coalition = self.extract_agents_and_groups( 228 expression 229 ) 230 nesting_coefficient = self.calculate_nesting_coefficient(expression) 231 opstats = self.count_operator_occurrences(expression) 232 233 # Calculate base complexity 234 base_complexity = self.calculate_base_complexity( 235 opstats, num_agents, max_coalition 236 ) 237 238 # Apply nesting penalty 239 raw_score = self.apply_nesting_penalty(base_complexity, nesting_coefficient) 240 241 # Normalize to [0, 1] 242 final_score = self.normalize_score(raw_score) 243 244 # Prepare analysis details 245 from mcmas import fmtk 246 from mcmas.models import spec # logic import complexity 247 248 return spec.ComplexityAnalysis( 249 score=final_score, 250 formula=expression, 251 metadata=fmtk.AnalysisMeta( 252 formula_index=index, 253 base_complexity=base_complexity, 254 nesting_coefficient=nesting_coefficient, 255 raw_score=raw_score, 256 ).model_dump(exclude_unset=True), 257 statistics=ComplexityStats( 258 coalitions=CoalitionStats( 259 max_coalition_size=max_coalition, 260 num_agents=num_agents, 261 num_groups=num_groups, 262 ).model_dump(), 263 operators=opstats.model_dump(), 264 ).model_dump(), 265 )
Main function to analyze logical expression complexity.
Args: expression: String containing logical formula
Returns: Tuple of (complexity_score, analysis_details) - complexity_score: Float between 0 and 1 - analysis_details: Dict with breakdown of analysis