From fb67acc55e7f3a715899d6696bd4c50da080e0dc Mon Sep 17 00:00:00 2001 From: Arity-T Date: Wed, 21 May 2025 19:34:08 +0300 Subject: [PATCH] =?UTF-8?q?=D0=A1=D0=B5=D0=BC=D0=B0=D0=BD=D1=82=D0=B8?= =?UTF-8?q?=D1=87=D0=B5=D1=81=D0=BA=D0=B8=D0=B5=20=D0=B4=D0=B5=D0=B9=D1=81?= =?UTF-8?q?=D1=82=D0=B2=D0=B8=D1=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- lab5/programm/grammar.py | 36 +++++++++++++++++++++++++++++++- lab5/programm/main.py | 45 ++++++++++++++++++++-------------------- 2 files changed, 57 insertions(+), 24 deletions(-) diff --git a/lab5/programm/grammar.py b/lab5/programm/grammar.py index 3a4d7dd..85302b2 100644 --- a/lab5/programm/grammar.py +++ b/lab5/programm/grammar.py @@ -1,6 +1,7 @@ import random import re from collections import OrderedDict +from typing import Callable from prettytable import PrettyTable @@ -8,7 +9,11 @@ from prettytable import PrettyTable class Grammar: EPSILON: str = "epsilon" - def __init__(self, text: str): + def __init__( + self, + text: str, + semantic_action: Callable[[int, tuple[str, list[str]]], None] | None = None, + ): self.productions: OrderedDict[str, list[list[str]]] = OrderedDict() self.start_symbol: str = "" self._parse_productions(text) @@ -33,6 +38,9 @@ class Grammar: self.rule_numbers[(nt, tuple(rule))] = rule_idx rule_idx += 1 + # Semantic action callback + self.semantic_action = semantic_action + def _parse_productions(self, text: str): for line in text.splitlines(): line = line.strip() @@ -309,6 +317,10 @@ class Grammar: rule_number = self.rule_numbers[(top, tuple(production))] rules_applied.append(rule_number) + # Execute semantic action if provided + if self.semantic_action: + self.semantic_action(rule_number, (top, production)) + # Добавляем правило в стек в обратном порядке for symbol in reversed(production): stack.append(symbol) @@ -399,3 +411,25 @@ class Grammar: steps.append(current) return steps + + +class ActionsList: + def __init__(self, actions: list[Callable[[int, tuple[str, list[str]]], None]]): + self.actions = actions + + def __call__(self, rule_number: int, rule_tuple: tuple[str, list[str]]) -> None: + self.actions[rule_number - 1](rule_number, rule_tuple) + + +class ActionsListWithAppliedCount: + def __init__( + self, actions: list[Callable[[int, int, tuple[str, list[str]]], None]] + ): + self.actions = actions + self.applied_counters = [0] * len(actions) + + def __call__(self, rule_number: int, rule_tuple: tuple[str, list[str]]) -> None: + self.applied_counters[rule_number - 1] += 1 + self.actions[rule_number - 1]( + rule_number, self.applied_counters[rule_number - 1], rule_tuple + ) diff --git a/lab5/programm/main.py b/lab5/programm/main.py index 973732e..fcdff2a 100644 --- a/lab5/programm/main.py +++ b/lab5/programm/main.py @@ -1,11 +1,30 @@ -from grammar import Grammar +from grammar import ActionsListWithAppliedCount, Grammar def load_grammar(filename: str = "grammar.txt") -> Grammar | None: try: + # #b - 2 * #a + 3 * #c + actions = [ + lambda rule_number, applied_count, _: print( + f"Rule #{rule_number} (applied x{applied_count} times): iconst_1" + ), + lambda rule_number, applied_count, _: print( + f"Rule #{rule_number} (applied x{applied_count} times): iconst_2 isub" + ), + lambda rule_number, applied_count, _: print( + f"Rule #{rule_number} (applied x{applied_count} times): iconst_1 iadd" + ), + lambda rule_number, applied_count, _: print( + f"Rule #{rule_number} (applied x{applied_count} times): iconst_1 iadd" + ), + lambda rule_number, applied_count, _: print( + f"Rule #{rule_number} (applied x{applied_count} times): iconst_3 iadd" + ), + ] + with open(filename, "r", encoding="utf-8") as file: text = file.read() - grammar = Grammar(text) + grammar = Grammar(text, ActionsListWithAppliedCount(actions)) # Сохраняем информацию о грамматике в файлы with open("grammar_rules.txt", "w", encoding="utf-8") as output_file: @@ -40,7 +59,6 @@ def load_grammar(filename: str = "grammar.txt") -> Grammar | None: def tokenize_string(input_string: str) -> list[str]: - input_string = input_string.replace(",", " , ").replace(".", " . ") return input_string.split() @@ -52,16 +70,7 @@ def check_string(grammar: Grammar | None, input_string: str) -> None: print(f"Проверка строки: '{input_string}'") try: input_tokens = tokenize_string(input_string) - - if not input_tokens: - parse_result = grammar.analyze(input_tokens) - else: - try: - input_tokens[0] = input_tokens[0][0].lower() + input_tokens[0][1:] - parse_result = grammar.analyze(input_tokens) - except ValueError as e: - input_tokens[0] = input_tokens[0][0].upper() + input_tokens[0][1:] - parse_result = grammar.analyze(input_tokens) + parse_result = grammar.analyze(input_tokens) print(f"Результат: Строка соответствует грамматике") print(f"Применённые правила: {parse_result}") @@ -86,15 +95,6 @@ def check_string(grammar: Grammar | None, input_string: str) -> None: print(f"Произошла ошибка при анализе: {e}") -def post_process_string(string: str) -> str: - if string: - string = string[0].upper() + string[1:] - string = string.replace(" ,", ",") - string = string.replace(" .", ".") - string = string.replace(",.", ".") - return string - - def generate_string(grammar: Grammar | None) -> None: if not grammar: print("Ошибка: Грамматика не загружена") @@ -103,7 +103,6 @@ def generate_string(grammar: Grammar | None) -> None: try: terminals, rules = grammar.generate() generated_string = " ".join(terminals) - generated_string = post_process_string(generated_string) print(f"Сгенерированная строка: {generated_string}") print(f"Применённые правила: {rules}")