Семантические действия
This commit is contained in:
@@ -1,11 +1,30 @@
|
||||
from grammar import Grammar
|
||||
from grammar import ActionsListWithAppliedCount, Grammar
|
||||
|
||||
|
||||
def load_grammar(filename: str = "grammar.txt") -> Grammar | None:
|
||||
try:
|
||||
# #b - 2 * #a + 3 * #c
|
||||
actions = [
|
||||
lambda rule_number, applied_count, _: print(
|
||||
f"Rule #{rule_number} (applied x{applied_count} times): iconst_1"
|
||||
),
|
||||
lambda rule_number, applied_count, _: print(
|
||||
f"Rule #{rule_number} (applied x{applied_count} times): iconst_2 isub"
|
||||
),
|
||||
lambda rule_number, applied_count, _: print(
|
||||
f"Rule #{rule_number} (applied x{applied_count} times): iconst_1 iadd"
|
||||
),
|
||||
lambda rule_number, applied_count, _: print(
|
||||
f"Rule #{rule_number} (applied x{applied_count} times): iconst_1 iadd"
|
||||
),
|
||||
lambda rule_number, applied_count, _: print(
|
||||
f"Rule #{rule_number} (applied x{applied_count} times): iconst_3 iadd"
|
||||
),
|
||||
]
|
||||
|
||||
with open(filename, "r", encoding="utf-8") as file:
|
||||
text = file.read()
|
||||
grammar = Grammar(text)
|
||||
grammar = Grammar(text, ActionsListWithAppliedCount(actions))
|
||||
|
||||
# Сохраняем информацию о грамматике в файлы
|
||||
with open("grammar_rules.txt", "w", encoding="utf-8") as output_file:
|
||||
@@ -40,7 +59,6 @@ def load_grammar(filename: str = "grammar.txt") -> Grammar | None:
|
||||
|
||||
|
||||
def tokenize_string(input_string: str) -> list[str]:
|
||||
input_string = input_string.replace(",", " , ").replace(".", " . ")
|
||||
return input_string.split()
|
||||
|
||||
|
||||
@@ -52,16 +70,7 @@ def check_string(grammar: Grammar | None, input_string: str) -> None:
|
||||
print(f"Проверка строки: '{input_string}'")
|
||||
try:
|
||||
input_tokens = tokenize_string(input_string)
|
||||
|
||||
if not input_tokens:
|
||||
parse_result = grammar.analyze(input_tokens)
|
||||
else:
|
||||
try:
|
||||
input_tokens[0] = input_tokens[0][0].lower() + input_tokens[0][1:]
|
||||
parse_result = grammar.analyze(input_tokens)
|
||||
except ValueError as e:
|
||||
input_tokens[0] = input_tokens[0][0].upper() + input_tokens[0][1:]
|
||||
parse_result = grammar.analyze(input_tokens)
|
||||
parse_result = grammar.analyze(input_tokens)
|
||||
|
||||
print(f"Результат: Строка соответствует грамматике")
|
||||
print(f"Применённые правила: {parse_result}")
|
||||
@@ -86,15 +95,6 @@ def check_string(grammar: Grammar | None, input_string: str) -> None:
|
||||
print(f"Произошла ошибка при анализе: {e}")
|
||||
|
||||
|
||||
def post_process_string(string: str) -> str:
|
||||
if string:
|
||||
string = string[0].upper() + string[1:]
|
||||
string = string.replace(" ,", ",")
|
||||
string = string.replace(" .", ".")
|
||||
string = string.replace(",.", ".")
|
||||
return string
|
||||
|
||||
|
||||
def generate_string(grammar: Grammar | None) -> None:
|
||||
if not grammar:
|
||||
print("Ошибка: Грамматика не загружена")
|
||||
@@ -103,7 +103,6 @@ def generate_string(grammar: Grammar | None) -> None:
|
||||
try:
|
||||
terminals, rules = grammar.generate()
|
||||
generated_string = " ".join(terminals)
|
||||
generated_string = post_process_string(generated_string)
|
||||
print(f"Сгенерированная строка: {generated_string}")
|
||||
print(f"Применённые правила: {rules}")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user