Skip to content

Commit a259abf

Browse files
committed
refactor: improve MikroTik traceroute output cleaning and aggregation logic
1 parent bac2bc1 commit a259abf

File tree

1 file changed

+111
-57
lines changed

1 file changed

+111
-57
lines changed

hyperglass/plugins/_builtin/mikrotik_garbage_output.py

Lines changed: 111 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -41,78 +41,132 @@ def _clean_traceroute_output(self, raw_output: str) -> str:
4141
return ""
4242

4343
lines = raw_output.splitlines()
44-
cleaned_lines = []
45-
found_header = False
46-
data_lines = []
44+
# Remove command echoes and paging, keep only header markers and data lines
45+
# We'll split the output into discrete tables (each table begins at a header)
46+
tables: t.List[t.List[str]] = []
47+
current_table: t.List[str] = []
48+
header_line: t.Optional[str] = None
4749

4850
for line in lines:
4951
stripped = line.strip()
5052

51-
# Skip empty lines
52-
if not stripped:
53-
continue
54-
55-
# Skip interactive paging prompts
56-
if "-- [Q quit|C-z pause]" in stripped or "-- [Q quit|D dump|C-z pause]" in stripped:
53+
# Skip empty lines and interactive paging prompts
54+
if not stripped or "-- [Q quit|C-z pause]" in stripped or "-- [Q quit|D dump|C-z pause]" in stripped:
5755
continue
5856

5957
# Skip command echo lines
6058
if "tool traceroute" in stripped:
6159
continue
6260

63-
# Look for the header line (ADDRESS LOSS SENT LAST AVG BEST WORST)
61+
# If this is a header line, start a new table
6462
if "ADDRESS" in stripped and "LOSS" in stripped and "SENT" in stripped:
65-
if not found_header:
66-
cleaned_lines.append(line)
67-
found_header = True
63+
header_line = line
64+
# If we were collecting a table, push it
65+
if current_table:
66+
tables.append(current_table)
67+
current_table = []
68+
# Start collecting after header
6869
continue
6970

70-
# After finding header, collect all data lines
71-
if found_header and stripped:
72-
data_lines.append(line)
73-
74-
# Process data lines to aggregate trailing timeouts
75-
if data_lines:
76-
processed_lines = []
77-
trailing_timeout_count = 0
78-
79-
# Work backwards to count trailing timeouts
80-
for i in range(len(data_lines) - 1, -1, -1):
81-
line = data_lines[i]
82-
if (
83-
"100%" in line.strip()
84-
and "timeout" in line.strip()
85-
and not line.strip().startswith(
86-
("1", "2", "3", "4", "5", "6", "7", "8", "9", "0")
87-
)
88-
):
89-
# This is a timeout line (no IP address at start)
90-
trailing_timeout_count += 1
91-
else:
92-
# Found a non-timeout line, stop counting
93-
break
94-
95-
# Add non-trailing lines as-is
96-
non_trailing_count = len(data_lines) - trailing_timeout_count
97-
processed_lines.extend(data_lines[:non_trailing_count])
98-
99-
# Handle trailing timeouts
100-
if trailing_timeout_count > 0:
101-
if trailing_timeout_count <= 3:
102-
# If 3 or fewer trailing timeouts, show them all
103-
processed_lines.extend(data_lines[non_trailing_count:])
104-
else:
105-
# If more than 3 trailing timeouts, show first 2 and aggregate the rest
106-
processed_lines.extend(data_lines[non_trailing_count : non_trailing_count + 2])
107-
remaining_timeouts = trailing_timeout_count - 2
108-
# Add an aggregation line
109-
processed_lines.append(
110-
f" ... ({remaining_timeouts} more timeout hops)"
111-
)
71+
# Collect data lines (will be associated with the most recent header)
72+
if header_line is not None:
73+
current_table.append(line)
74+
75+
# Push the last collected table if any
76+
if current_table:
77+
tables.append(current_table)
78+
79+
# If we didn't find any header/data, return cleaned minimal output
80+
if not tables:
81+
# Fallback to previous behavior: remove prompts and flags
82+
filtered_lines: t.List[str] = []
83+
in_flags_section = False
84+
for line in lines:
85+
stripped_line = line.strip()
86+
if stripped_line.startswith("@") and stripped_line.endswith("] >"):
87+
continue
88+
if "[Q quit|D dump|C-z pause]" in stripped_line:
89+
continue
90+
if stripped_line.startswith("Flags:"):
91+
in_flags_section = True
92+
continue
93+
if in_flags_section:
94+
if "=" in stripped_line:
95+
in_flags_section = False
96+
else:
97+
continue
98+
filtered_lines.append(line)
99+
return "\n".join(filtered_lines)
112100

113-
cleaned_lines.extend(processed_lines)
101+
# Aggregate tables by hop index. For each hop position, pick the row with the
102+
# highest SENT count. If SENT ties, prefer non-timeout rows and the later table.
103+
processed_lines: t.List[str] = []
104+
105+
# Regex to extract LOSS% and SENT count following it: e.g. '0% 3'
106+
sent_re = re.compile(r"(\d+)%\s+(\d+)\b")
107+
108+
max_rows = max(len(t) for t in tables)
109+
110+
for i in range(max_rows):
111+
best_row = None
112+
best_sent = -1
113+
best_is_timeout = True
114+
best_table_index = -1
115+
116+
for ti, table in enumerate(tables):
117+
if i >= len(table):
118+
continue
119+
row = table[i]
120+
m = sent_re.search(row)
121+
if m:
122+
try:
123+
sent = int(m.group(2))
124+
except Exception:
125+
sent = 0
126+
else:
127+
sent = 0
128+
129+
is_timeout = "timeout" in row.lower() or ("100%" in row and "timeout" in row.lower())
130+
131+
# Prefer higher SENT, then prefer non-timeout, then later table (higher ti)
132+
pick = False
133+
if sent > best_sent:
134+
pick = True
135+
elif sent == best_sent:
136+
if best_is_timeout and not is_timeout:
137+
pick = True
138+
elif (best_is_timeout == is_timeout) and ti > best_table_index:
139+
pick = True
140+
141+
if pick:
142+
best_row = row
143+
best_sent = sent
144+
best_is_timeout = is_timeout
145+
best_table_index = ti
146+
147+
if best_row is not None:
148+
processed_lines.append(best_row)
149+
150+
# Collapse excessive trailing timeouts into an aggregation line
151+
trailing_timeouts = 0
152+
for line in reversed(processed_lines):
153+
if ("timeout" in line.lower()) or (sent_re.search(line) and sent_re.search(line).group(1) == "100"):
154+
trailing_timeouts += 1
155+
else:
156+
break
114157

115-
return "\n".join(cleaned_lines)
158+
if trailing_timeouts > 3:
159+
non_trailing = len(processed_lines) - trailing_timeouts
160+
# Keep first 2 of trailing timeouts and aggregate the rest
161+
aggregated = processed_lines[:non_trailing] + processed_lines[non_trailing:non_trailing + 2]
162+
remaining = trailing_timeouts - 2
163+
aggregated.append(f" ... ({remaining} more timeout hops)")
164+
processed_lines = aggregated
165+
166+
# Prepend header line if we have one
167+
header_to_use = header_line or "ADDRESS LOSS SENT LAST AVG BEST WORST STD-DEV STATUS"
168+
cleaned = [header_to_use] + processed_lines
169+
return "\n".join(cleaned)
116170

117171
def process(self, *, output: OutputType, query: "Query") -> Series[str]:
118172
"""

0 commit comments

Comments
 (0)