This commit is contained in:
2026-05-17 20:54:53 +05:00
parent 65a0babeab
commit 27eb4fd606
90 changed files with 12343 additions and 0 deletions
+21
View File
@@ -0,0 +1,21 @@
# Backend env example
APP_ENV=dev
SECRET_KEY=change-me-to-32-bytes-random-secret
ACCESS_TOKEN_EXPIRE_MINUTES=60
REFRESH_TOKEN_EXPIRE_DAYS=14
DATABASE_URL=postgresql+psycopg2://mikrocloud:mikrocloud@postgres:5432/mikrocloud
REDIS_URL=redis://redis:6379/0
# MinIO / S3
S3_ENDPOINT=http://minio:9000
S3_ACCESS_KEY=minio
S3_SECRET_KEY=minio12345
S3_BUCKET=mikrocloud-backups
# Bootstrap admin (создаётся при первом запуске)
BOOTSTRAP_ADMIN_EMAIL=admin
BOOTSTRAP_ADMIN_PASSWORD=admin
# CORS
CORS_ORIGINS=http://localhost:5173,http://127.0.0.1:5173
+19
View File
@@ -0,0 +1,19 @@
FROM python:3.12-slim
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1
WORKDIR /app
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential libpq-dev curl postgresql-client \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --upgrade pip && pip install -r requirements.txt
COPY app ./app
EXPOSE 8000
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
View File
View File
+39
View File
@@ -0,0 +1,39 @@
from __future__ import annotations
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from sqlalchemy.orm import Session
from ..core.db import get_db
from ..core.security import decode_token
from ..models.user import User
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/login")
def get_current_user(
token: str = Depends(oauth2_scheme),
db: Session = Depends(get_db),
) -> User:
try:
payload = decode_token(token)
except ValueError as exc:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, str(exc)) from exc
if payload.get("type") != "access":
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "wrong token type")
user_id = payload.get("sub")
user = db.query(User).filter(User.id == int(user_id)).first() if user_id else None
if not user or not user.is_active:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "user not found or inactive")
return user
def require_role(*roles: str):
def _checker(user: User = Depends(get_current_user)) -> User:
if roles and user.role not in roles:
raise HTTPException(status.HTTP_403_FORBIDDEN, "insufficient permissions")
return user
return _checker
+26
View File
@@ -0,0 +1,26 @@
from __future__ import annotations
from fastapi import APIRouter
from .v1 import alerts as alerts_router
from .v1 import auth as auth_router
from .v1 import backups as backups_router
from .v1 import cli as cli_router
from .v1 import controller_backup as controller_backup_router
from .v1 import devices as devices_router
from .v1 import firmware as firmware_router
from .v1 import health as health_router
from .v1 import metrics as metrics_router
from .v1 import settings as settings_router
api_router = APIRouter(prefix="/api/v1")
api_router.include_router(health_router.router, tags=["health"])
api_router.include_router(auth_router.router, prefix="/auth", tags=["auth"])
api_router.include_router(devices_router.router, prefix="/devices", tags=["devices"])
api_router.include_router(backups_router.router, tags=["backups"])
api_router.include_router(firmware_router.router, prefix="/firmware", tags=["firmware"])
api_router.include_router(alerts_router.router, prefix="/alerts", tags=["alerts"])
api_router.include_router(metrics_router.router, tags=["metrics"])
api_router.include_router(cli_router.router, prefix="/cli", tags=["cli"])
api_router.include_router(controller_backup_router.router, prefix="/controller/backup", tags=["controller"])
api_router.include_router(settings_router.router, prefix="/settings", tags=["settings"])
View File
+88
View File
@@ -0,0 +1,88 @@
from __future__ import annotations
from fastapi import APIRouter, Depends, HTTPException, Response, status
from sqlalchemy.orm import Session
from ...core.db import get_db
from ...models.alert import Alert
from ...models.user import User
from ...schemas.alert import AlertOut
from ..deps import get_current_user, require_role
router = APIRouter()
@router.get("", response_model=list[AlertOut])
def list_alerts(
only_unack: bool = False,
limit: int = 200,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[Alert]:
q = db.query(Alert)
if only_unack:
q = q.filter(Alert.acknowledged.is_(False))
return q.order_by(Alert.created_at.desc()).limit(limit).all()
@router.get("/unread-count")
def unread_count(
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> dict[str, int]:
n = db.query(Alert).filter(Alert.acknowledged.is_(False)).count()
return {"count": n}
@router.post("/{alert_id}/ack", response_model=AlertOut)
def acknowledge(
alert_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> Alert:
a = db.get(Alert, alert_id)
if not a:
raise HTTPException(status.HTTP_404_NOT_FOUND, "alert not found")
a.acknowledged = True
db.commit()
db.refresh(a)
return a
@router.post("/ack-all")
def acknowledge_all(
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> dict[str, int]:
n = db.query(Alert).filter(Alert.acknowledged.is_(False)).update({"acknowledged": True})
db.commit()
return {"updated": n}
@router.delete("/{alert_id}", status_code=status.HTTP_204_NO_CONTENT, response_class=Response)
def delete_alert(
alert_id: int,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin")),
) -> Response:
a = db.get(Alert, alert_id)
if not a:
raise HTTPException(status.HTTP_404_NOT_FOUND, "alert not found")
db.delete(a)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.delete("")
def purge_alerts(
only_acked: bool = False,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin")),
) -> dict:
"""Очистить лог алертов. По умолчанию удаляет всё; only_acked=true — только прочитанные."""
q = db.query(Alert)
if only_acked:
q = q.filter(Alert.acknowledged == True) # noqa: E712
n = q.delete(synchronize_session=False)
db.commit()
return {"deleted": int(n or 0)}
+105
View File
@@ -0,0 +1,105 @@
from __future__ import annotations
from fastapi import APIRouter, Depends, HTTPException, Request, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
from ...core.db import get_db
from pydantic import BaseModel
from ...core.security import (
create_access_token,
create_refresh_token,
decode_token,
hash_password,
verify_password,
)
from ...models.user import User
from ...schemas.auth import LoginIn, RefreshIn, TokenPair, UserOut
from ...services.events import add_audit
from ..deps import get_current_user
class ChangePasswordIn(BaseModel):
current: str
new: str
router = APIRouter()
def _issue(user: User) -> TokenPair:
return TokenPair(
access_token=create_access_token(user.id, extra={"role": user.role}),
refresh_token=create_refresh_token(user.id),
)
def _client_ip(req: Request) -> str | None:
fwd = req.headers.get("x-forwarded-for")
if fwd:
return fwd.split(",")[0].strip()
return req.client.host if req.client else None
@router.post("/login", response_model=TokenPair)
def login_json(payload: LoginIn, request: Request, db: Session = Depends(get_db)) -> TokenPair:
user = db.query(User).filter(User.email == payload.email).first()
ip = _client_ip(request)
if not user or not verify_password(payload.password, user.hashed_password):
add_audit(db, actor=payload.email, action="login.fail", ip=ip, detail="invalid credentials")
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "invalid credentials")
if not user.is_active:
add_audit(db, actor=payload.email, action="login.fail", ip=ip, detail="user disabled")
raise HTTPException(status.HTTP_403_FORBIDDEN, "user disabled")
add_audit(db, actor=user.email, action="login.success", ip=ip)
return _issue(user)
@router.post("/login/form", response_model=TokenPair, include_in_schema=False)
def login_form(
request: Request,
form: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_db),
) -> TokenPair:
"""Совместимость со Swagger «Authorize»."""
user = db.query(User).filter(User.email == form.username).first()
ip = _client_ip(request)
if not user or not verify_password(form.password, user.hashed_password):
add_audit(db, actor=form.username, action="login.fail", ip=ip, detail="invalid credentials")
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "invalid credentials")
add_audit(db, actor=user.email, action="login.success", ip=ip)
return _issue(user)
@router.post("/refresh", response_model=TokenPair)
def refresh(payload: RefreshIn, db: Session = Depends(get_db)) -> TokenPair:
try:
data = decode_token(payload.refresh_token)
except ValueError as exc:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, str(exc)) from exc
if data.get("type") != "refresh":
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "wrong token type")
user = db.query(User).filter(User.id == int(data["sub"])).first()
if not user or not user.is_active:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "user not found")
return _issue(user)
@router.get("/me", response_model=UserOut)
def me(user: User = Depends(get_current_user)) -> User:
return user
@router.post("/change-password")
def change_password(
payload: ChangePasswordIn,
user: User = Depends(get_current_user),
db: Session = Depends(get_db),
) -> dict[str, bool]:
if not verify_password(payload.current, user.hashed_password):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "Текущий пароль неверный")
if len(payload.new) < 4:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "Новый пароль слишком короткий")
user.hashed_password = hash_password(payload.new)
db.commit()
return {"ok": True}
+160
View File
@@ -0,0 +1,160 @@
from __future__ import annotations
from fastapi import APIRouter, Depends, HTTPException, Response, status
from sqlalchemy.orm import Session
from ...core.db import get_db
from ...core.security import decrypt_secret
from ...models.backup import DeviceBackup
from ...models.device import Device
from ...models.user import User
from ...schemas.backup import BackupOut
from ...services.routeros.backup import create_and_download_backup
from ...services.routeros.client import RouterOSCredentials, RouterOSError
from ...services.backup_ftp_server import detect_push_host
from ...core.config import get_settings
from ..deps import get_current_user, require_role
router = APIRouter()
MAX_BACKUPS_PER_DEVICE = 10
def _creds(d: Device) -> RouterOSCredentials:
return RouterOSCredentials(
host=d.host,
username=d.username,
password=decrypt_secret(d.password_enc),
port=d.port,
use_tls=d.use_tls,
timeout=15.0,
)
def _rotate(db: Session, device_id: int) -> None:
"""Удаляет старые записи, если их больше MAX_BACKUPS_PER_DEVICE.
Считаем по уникальному base_name (.backup и .rsc — одна пара)."""
rows = (
db.query(DeviceBackup)
.filter(DeviceBackup.device_id == device_id)
.order_by(DeviceBackup.created_at.desc())
.all()
)
seen: set[str] = set()
keep_ids: set[int] = set()
for r in rows:
base = r.filename.rsplit(".", 1)[0]
if base in seen or len(seen) < MAX_BACKUPS_PER_DEVICE:
seen.add(base)
keep_ids.add(r.id)
for r in rows:
if r.id not in keep_ids:
db.delete(r)
db.commit()
@router.get("/devices/{device_id}/backups", response_model=list[BackupOut])
def list_backups(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[DeviceBackup]:
if not db.get(Device, device_id):
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
return (
db.query(DeviceBackup)
.filter(DeviceBackup.device_id == device_id)
.order_by(DeviceBackup.created_at.desc())
.all()
)
@router.post(
"/devices/{device_id}/backups",
response_model=list[BackupOut],
status_code=status.HTTP_201_CREATED,
)
def create_backup(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> list[DeviceBackup]:
"""Создать бэкап (binary + text), скачать через SFTP, сохранить в БД."""
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
from datetime import datetime, timezone
import re
# router id = identity устройства (как оно зовётся в RouterOS),
# fallback: name из БД, потом host. Чистим до [A-Za-z0-9_-].
raw_id = (d.identity or d.name or d.host or "device").strip()
safe_id = re.sub(r"[^A-Za-z0-9_-]+", "_", raw_id).strip("_") or "device"
ts = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
base = f"{safe_id}-{ts}"
cfg = get_settings()
push_host = cfg.backup_push_host or detect_push_host()
push_port = cfg.backup_ftp_port
try:
files = create_and_download_backup(
_creds(d), base, push_host=push_host, push_port=push_port,
)
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
rec_bin = DeviceBackup(
device_id=d.id, filename=files.binary_name, fmt="binary",
size=len(files.binary_data), content=files.binary_data,
)
rec_txt = DeviceBackup(
device_id=d.id, filename=files.text_name, fmt="text",
size=len(files.text_data), content=files.text_data,
)
db.add(rec_bin)
db.add(rec_txt)
db.commit()
db.refresh(rec_bin)
db.refresh(rec_txt)
_rotate(db, d.id)
return (
db.query(DeviceBackup)
.filter(DeviceBackup.device_id == device_id)
.order_by(DeviceBackup.created_at.desc())
.all()
)
@router.get("/backups/{backup_id}/download")
def download_backup(
backup_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> Response:
rec = db.get(DeviceBackup, backup_id)
if not rec:
raise HTTPException(status.HTTP_404_NOT_FOUND, "backup not found")
media_type = "application/octet-stream" if rec.fmt == "binary" else "text/plain; charset=utf-8"
return Response(
content=rec.content,
media_type=media_type,
headers={"Content-Disposition": f'attachment; filename="{rec.filename}"'},
)
@router.delete("/backups/{backup_id}", status_code=status.HTTP_204_NO_CONTENT, response_class=Response)
def delete_backup(
backup_id: int,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin")),
) -> Response:
rec = db.get(DeviceBackup, backup_id)
if not rec:
raise HTTPException(status.HTTP_404_NOT_FOUND, "backup not found")
db.delete(rec)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
+105
View File
@@ -0,0 +1,105 @@
from __future__ import annotations
from typing import Any
from fastapi import APIRouter, Depends, HTTPException, status
from pydantic import BaseModel, Field
from sqlalchemy.orm import Session
from ...core.db import get_db
from ...core.security import decrypt_secret
from ...models.device import Device
from ...models.user import User
from ...services.events import add_audit
from ...services.routeros.client import (
RouterOSCredentials,
RouterOSError,
execute_cli,
)
from ..deps import require_role
router = APIRouter()
# Опасные команды требуют явного подтверждения через query ?confirm=1
DANGEROUS_PREFIXES = (
"/system/reboot",
"/system/shutdown",
"/system/reset-configuration",
"/system/routerboard/upgrade",
"/file/remove",
)
class CLIRunIn(BaseModel):
device_ids: list[int] = Field(default_factory=list)
command: str
confirm: bool = False
class CLIDeviceResult(BaseModel):
device_id: int
device_name: str | None = None
ok: bool
rows: list[dict[str, Any]] | None = None
error: str | None = None
class CLIRunOut(BaseModel):
command: str
results: list[CLIDeviceResult]
@router.post("/run", response_model=CLIRunOut)
def run_cli(
payload: CLIRunIn,
db: Session = Depends(get_db),
user: User = Depends(require_role("admin", "operator")),
) -> CLIRunOut:
if not payload.device_ids:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "device_ids is empty")
cmd = payload.command.strip()
if not cmd:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "command is empty")
is_dangerous = any(cmd.startswith(p) for p in DANGEROUS_PREFIXES)
if is_dangerous and not payload.confirm:
raise HTTPException(
status.HTTP_409_CONFLICT,
"dangerous command requires confirmation (set confirm=true)",
)
results: list[CLIDeviceResult] = []
for did in payload.device_ids:
d = db.get(Device, did)
if not d:
results.append(CLIDeviceResult(device_id=did, ok=False, error="device not found"))
continue
try:
rows = execute_cli(
RouterOSCredentials(
host=d.host,
username=d.username,
password=decrypt_secret(d.password_enc),
port=d.port,
use_tls=d.use_tls,
timeout=10.0,
),
cmd,
)
results.append(
CLIDeviceResult(device_id=did, device_name=d.identity or d.name, ok=True, rows=rows)
)
except RouterOSError as exc:
results.append(
CLIDeviceResult(device_id=did, device_name=d.identity or d.name, ok=False, error=str(exc))
)
add_audit(
db,
actor=user.email,
action="cli.run",
target=f"device:{did}",
detail=cmd[:200],
)
return CLIRunOut(command=cmd, results=results)
+57
View File
@@ -0,0 +1,57 @@
from __future__ import annotations
from fastapi import APIRouter, Depends, File, HTTPException, Response, UploadFile, status
from ...models.user import User
from ...services.controller_backup import (
make_config_only_archive,
make_full_archive,
restore_full_archive,
)
from ..deps import require_role
router = APIRouter()
@router.get("/config")
def download_config_backup(
_: User = Depends(require_role("admin")),
) -> Response:
name, data = make_config_only_archive()
return Response(
content=data,
media_type="application/gzip",
headers={"Content-Disposition": f'attachment; filename="{name}"'},
)
@router.get("/full")
def download_full_backup(
_: User = Depends(require_role("admin")),
) -> Response:
try:
name, data = make_full_archive()
except RuntimeError as exc:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR, str(exc)) from exc
return Response(
content=data,
media_type="application/gzip",
headers={"Content-Disposition": f'attachment; filename="{name}"'},
)
@router.post("/restore")
async def restore_backup(
file: UploadFile = File(...),
_: User = Depends(require_role("admin")),
) -> dict:
"""Развёртывание full-бэкапа (tar.gz с db.dump). Деструктивно: дропает текущую БД."""
if not file.filename or not file.filename.endswith((".tar.gz", ".tgz")):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "Ожидается файл .tar.gz")
data = await file.read()
if len(data) > 500 * 1024 * 1024:
raise HTTPException(status.HTTP_413_REQUEST_ENTITY_TOO_LARGE, "Архив слишком большой (>500 MiB)")
try:
return restore_full_archive(data)
except RuntimeError as exc:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR, str(exc)) from exc
+494
View File
@@ -0,0 +1,494 @@
from __future__ import annotations
from datetime import datetime, timezone
from fastapi import APIRouter, Depends, HTTPException, Response, status
from sqlalchemy.orm import Session
from ...core.db import get_db
from ...core.security import decrypt_secret, encrypt_secret
from ...models.device import Device
from ...models.metric import DeviceMetric
from ...models.user import User
from ...schemas.device import (
DeviceCreate,
DeviceOut,
DeviceResource,
DeviceUpdate,
)
from ...services.events import add_alert, add_audit
from ...services.routeros.client import (
RouterOSCredentials,
RouterOSError,
check_internet,
cmd_reboot,
cmd_safe_mode,
cmd_upgrade_check,
cmd_upgrade_install,
fetch_dhcp_leases,
fetch_identity,
fetch_interface_stats,
fetch_resource,
parse_uptime,
push_firmware_via_ftp,
)
from ..deps import get_current_user, require_role
router = APIRouter()
def _creds(d: Device) -> RouterOSCredentials:
return RouterOSCredentials(
host=d.host,
username=d.username,
password=decrypt_secret(d.password_enc),
port=d.port,
use_tls=d.use_tls,
)
@router.get("", response_model=list[DeviceOut])
def list_devices(
kind: str | None = None,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[Device]:
q = db.query(Device)
if kind:
q = q.filter(Device.kind == kind)
return q.order_by(Device.id.desc()).all()
@router.post("", response_model=DeviceOut, status_code=status.HTTP_201_CREATED)
def create_device(
payload: DeviceCreate,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> Device:
d = Device(
name=payload.name,
host=payload.host,
port=payload.port,
use_tls=payload.use_tls,
username=payload.username,
password_enc=encrypt_secret(payload.password),
kind=payload.kind or "router",
)
db.add(d)
db.commit()
db.refresh(d)
return d
@router.get("/{device_id}", response_model=DeviceOut)
def get_device(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> Device:
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
return d
@router.patch("/{device_id}", response_model=DeviceOut)
def update_device(
device_id: int,
payload: DeviceUpdate,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> Device:
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
data = payload.model_dump(exclude_unset=True)
if "password" in data:
d.password_enc = encrypt_secret(data.pop("password"))
for k, v in data.items():
setattr(d, k, v)
db.commit()
db.refresh(d)
return d
@router.delete("/{device_id}", status_code=status.HTTP_204_NO_CONTENT, response_class=Response)
def delete_device(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin")),
) -> Response:
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
db.delete(d)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post("/{device_id}/probe", response_model=DeviceResource)
def probe_device(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> DeviceResource:
"""Подключиться к устройству, прочитать `/system/resource` и обновить
метаданные (identity, model, serial, version, status)."""
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
try:
res = fetch_resource(_creds(d))
identity = fetch_identity(_creds(d))
except RouterOSError as exc:
d.status = "down"
d.last_error = str(exc)
db.commit()
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
d.identity = identity or d.identity
d.model = res.get("board-name") or d.model
d.ros_version = res.get("version") or d.ros_version
d.architecture = res.get("architecture-name") or d.architecture
prev_status = d.status
d.status = "up"
d.last_error = None
d.last_seen = datetime.now(timezone.utc)
def _to_int(v):
try:
return int(v) if v is not None else None
except (TypeError, ValueError):
return None
cpu = _to_int(res.get("cpu-load"))
free_mem = _to_int(res.get("free-memory"))
total_mem = _to_int(res.get("total-memory"))
uptime_s = parse_uptime(res.get("uptime"))
# abnormal reboot detection: новый uptime < предыдущего и отличие > 60s
abnormal = False
if uptime_s is not None and d.last_uptime_seconds is not None:
if uptime_s < d.last_uptime_seconds - 60:
abnormal = True
d.abnormal_reboot = True
add_alert(
db,
severity="warning",
category="abnormal_reboot",
source=f"device:{d.id}",
title=f"Возможен аварийный перезапуск: {d.identity or d.name}",
message=(
f"Uptime упал с {d.last_uptime_seconds}s до {uptime_s}s "
f"без штатной команды reboot."
),
)
if not abnormal:
d.abnormal_reboot = False
d.last_uptime_seconds = uptime_s
# internet check
try:
ok = check_internet(_creds(d))
d.internet_ok = ok
if not ok:
add_alert(
db,
severity="warning",
category="internet",
source=f"device:{d.id}",
title=f"Нет интернета на {d.identity or d.name}",
message="Ping 8.8.8.8 не прошёл.",
)
except Exception:
d.internet_ok = None
# уведомление о возврате в строй
if prev_status == "down" and d.status == "up":
add_alert(
db,
severity="info",
category="device",
source=f"device:{d.id}",
title=f"Устройство снова онлайн: {d.identity or d.name}",
)
mem_used_pct = None
if free_mem is not None and total_mem and total_mem > 0:
mem_used_pct = round(100 - (free_mem / total_mem) * 100, 1)
metric = DeviceMetric(
device_id=d.id,
cpu_load=float(cpu) if cpu is not None else None,
mem_used_pct=mem_used_pct,
free_memory=free_mem,
total_memory=total_mem,
uptime_seconds=uptime_s,
internet_ok=d.internet_ok,
)
db.add(metric)
db.commit()
return DeviceResource(
cpu_load=cpu,
free_memory=free_mem,
total_memory=total_mem,
uptime=res.get("uptime"),
version=res.get("version"),
board_name=res.get("board-name"),
architecture_name=res.get("architecture-name"),
)
@router.post("/{device_id}/reboot", status_code=status.HTTP_204_NO_CONTENT, response_class=Response)
def reboot_device(
device_id: int,
db: Session = Depends(get_db),
user: User = Depends(require_role("admin", "operator")),
) -> Response:
"""Отправить команду перезагрузки устройству."""
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
try:
cmd_reboot(_creds(d))
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
add_audit(db, actor=user.email, action="device.reboot", target=f"device:{device_id}")
add_alert(db, severity="info", category="device", source=f"device:{device_id}",
title=f"Reboot отправлен: {d.identity or d.name}", message=f"by {user.email}")
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post("/{device_id}/safe-mode", status_code=status.HTTP_204_NO_CONTENT, response_class=Response)
def toggle_safe_mode(
device_id: int,
db: Session = Depends(get_db),
user: User = Depends(require_role("admin", "operator")),
) -> Response:
"""Переключить safe mode на устройстве."""
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
try:
cmd_safe_mode(_creds(d))
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
add_audit(db, actor=user.email, action="device.safe_mode", target=f"device:{device_id}")
return Response(status_code=status.HTTP_204_NO_CONTENT)
# ---------- Sprint 09: интерфейсы / DHCP / upgrade ----------
@router.get("/{device_id}/interfaces")
def list_interfaces(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[dict]:
"""Список интерфейсов устройства со счётчиками rx/tx и running."""
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
try:
return fetch_interface_stats(_creds(d))
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
@router.get("/{device_id}/interface-traffic")
def interface_traffic(
device_id: int,
names: str | None = None,
hours: float = 24.0,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> dict:
"""Серии bps по выбранным интерфейсам за окно `hours`.
`names` — CSV. Если пусто — берётся из `device.monitored_interfaces`.
Возвращает {"series": {name: [{ts, rx_bps, tx_bps, running}]}}.
"""
from ...models.interface_stat import InterfaceStat
from datetime import datetime, timedelta, timezone
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
if not names:
names = d.monitored_interfaces or ""
name_list = [x.strip() for x in names.split(",") if x.strip()]
if not name_list:
return {"series": {}, "hours": hours}
since = datetime.now(timezone.utc) - timedelta(hours=hours)
rows = (
db.query(InterfaceStat)
.filter(
InterfaceStat.device_id == device_id,
InterfaceStat.name.in_(name_list),
InterfaceStat.ts >= since,
)
.order_by(InterfaceStat.name.asc(), InterfaceStat.ts.asc())
.all()
)
by_name: dict[str, list] = {n: [] for n in name_list}
last: dict[str, tuple] = {}
for r in rows:
prev = last.get(r.name)
rx_bps = tx_bps = None
if prev is not None:
dt = (r.ts - prev[0]).total_seconds()
if dt > 0:
# счётчики могут сброситься после reboot — игнорируем отрицательные дельты
drx = r.rx_bytes - prev[1]
dtx = r.tx_bytes - prev[2]
if drx >= 0 and dtx >= 0:
rx_bps = round(drx * 8 / dt)
tx_bps = round(dtx * 8 / dt)
by_name[r.name].append({
"ts": r.ts.isoformat(),
"rx_bps": rx_bps,
"tx_bps": tx_bps,
"running": r.running,
})
last[r.name] = (r.ts, r.rx_bytes, r.tx_bytes)
return {"series": by_name, "hours": hours}
@router.get("/{device_id}/uplink-status")
def uplink_status(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[dict]:
"""Текущий статус выбранных аплинков (running) — по последней записи."""
from ...models.interface_stat import InterfaceStat
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
name_list = [x.strip() for x in (d.uplink_interfaces or "").split(",") if x.strip()]
out = []
for n in name_list:
last = (
db.query(InterfaceStat)
.filter(InterfaceStat.device_id == device_id, InterfaceStat.name == n)
.order_by(InterfaceStat.ts.desc()).first()
)
out.append({
"name": n,
"running": bool(last.running) if last else None,
"ts": last.ts.isoformat() if last else None,
})
return out
@router.get("/{device_id}/dhcp-leases")
def dhcp_leases(
device_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[dict]:
"""Список выданных DHCP-лизов по всем DHCP-серверам устройства."""
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
try:
return fetch_dhcp_leases(_creds(d))
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
@router.post("/{device_id}/upgrade/internet")
def upgrade_from_internet(
device_id: int,
channel: str = "stable",
install: bool = False,
db: Session = Depends(get_db),
user: User = Depends(require_role("admin", "operator")),
) -> dict:
"""Запросить у MikroTik проверку обновления и при `install=true` — установить.
Идёт через штатный `/system/package/update` (репозиторий MikroTik).
Установка перезагрузит устройство.
"""
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
try:
info = cmd_upgrade_check(_creds(d), channel=channel)
if install:
cmd_upgrade_install(_creds(d))
add_audit(db, actor=user.email, action="device.upgrade.internet",
target=f"device:{device_id}", detail=f"channel={channel}")
add_alert(db, severity="info", category="firmware",
source=f"device:{device_id}",
title=f"Обновление из интернета запущено: {d.identity or d.name}",
message=f"by {user.email}, channel={channel}")
db.commit()
return {"ok": True, "info": info, "installed": bool(install)}
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
@router.post("/{device_id}/upgrade/local")
def upgrade_from_local(
device_id: int,
firmware_id: int,
reboot: bool = True,
db: Session = Depends(get_db),
user: User = Depends(require_role("admin", "operator")),
) -> dict:
"""Установить прошивку из локального репозитория контроллера.
Файл прошивки временно публикуется во встроенный FTP, устройство сам
скачивает его командой `/tool/fetch`, затем (опц.) перезагружается —
RouterOS установит .npk при загрузке.
"""
from ...models.firmware import Firmware
from ...services.backup_ftp_server import get_server, detect_push_host
from ...core.config import get_settings as _cfg
import os
d = db.get(Device, device_id)
if not d:
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
fw = db.get(Firmware, firmware_id)
if not fw:
raise HTTPException(status.HTTP_404_NOT_FOUND, "firmware not found")
if not fw.content:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "firmware has no payload")
srv = get_server()
if srv is None:
raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE, "backup ftp server not running")
cfg = _cfg()
push_host = cfg.backup_push_host or detect_push_host()
if not push_host:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "BACKUP_PUSH_HOST not configured")
sess = srv.open_session([fw.name])
try:
path = os.path.join(sess.home_dir, fw.name)
with open(path, "wb") as f:
f.write(fw.content)
try:
push_firmware_via_ftp(
_creds(d),
server=push_host, port=int(cfg.backup_ftp_port),
user=sess.username, password=sess.password,
src_path=fw.name, dst_filename=fw.name,
)
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
if reboot:
try:
cmd_reboot(_creds(d))
except RouterOSError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, str(exc)) from exc
add_audit(db, actor=user.email, action="device.upgrade.local",
target=f"device:{device_id}", detail=f"firmware={fw.name}")
add_alert(db, severity="info", category="firmware",
source=f"device:{device_id}",
title=f"Установлена локальная прошивка: {d.identity or d.name}",
message=f"{fw.name} by {user.email}")
db.commit()
return {"ok": True, "file": fw.name, "reboot": reboot}
finally:
srv.close_session(sess.session_id)
+359
View File
@@ -0,0 +1,359 @@
from __future__ import annotations
import hashlib
import os.path
import re
import httpx
from fastapi import APIRouter, Depends, File, Form, HTTPException, Response, UploadFile, status
from sqlalchemy.orm import Session
from ...core.db import get_db
from ...models.firmware import Firmware
from ...models.user import User
from ...schemas.firmware import (
FirmwareBulkImportIn,
FirmwareBulkOut,
FirmwareBulkResult,
FirmwareImportIn,
FirmwareOut,
FirmwareUpdateIn,
)
from ...services.firmware_check import CHANNELS, check_and_alert, get_state
from ..deps import get_current_user, require_role
router = APIRouter()
MAX_FIRMWARE_SIZE = 200 * 1024 * 1024 # 200 MiB лимит
# Известные архитектуры RouterOS v7 для bulk-импорта.
KNOWN_ARCHITECTURES = [
"arm64", "arm", "mipsbe", "mmips", "mipsle", "smips",
"tile", "ppc", "x86", "x86_64",
]
@router.get("", response_model=list[FirmwareOut])
def list_firmware(
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[Firmware]:
return db.query(Firmware).order_by(Firmware.created_at.desc()).all()
@router.post("/check")
def manual_check(
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> dict:
"""Ручная проверка наличия новых версий RouterOS по всем каналам."""
state = check_and_alert(db)
if not state:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, "upstream check failed")
# Для совместимости со старым UI возвращаем top-level stable.
stable = state.get("stable") or {}
return {
"latest_version": stable.get("version", ""),
"released_at": stable.get("released_at", ""),
"channels": state,
}
@router.get("/channels")
def list_channels(
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> dict:
"""Текущее состояние по каждому каналу + список известных архитектур."""
return {
"channels": get_state(db),
"available_channels": list(CHANNELS.keys()),
"architectures": KNOWN_ARCHITECTURES,
}
@router.post("/import", response_model=FirmwareOut, status_code=status.HTTP_201_CREATED)
def import_firmware(
payload: FirmwareImportIn,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> Firmware:
"""Скачать прошивку с указанного URL и сохранить во внутреннем репозитории.
Если прошивка с таким же `source_url` или (`version`+`architecture`) уже
есть — повторно не скачивается, возвращается существующая запись (HTTP 200
с тем же телом, как и для свежесозданной).
"""
url = str(payload.url)
# 1) Дедуп по URL источника.
existing = db.query(Firmware).filter(Firmware.source_url == url).first()
if existing:
return existing
# 2) Дедуп по (version, architecture), если оба поля переданы.
if payload.version and payload.architecture:
existing = (
db.query(Firmware)
.filter(
Firmware.version == payload.version,
Firmware.architecture == payload.architecture,
)
.first()
)
if existing:
return existing
try:
with httpx.stream("GET", url, follow_redirects=True, timeout=120.0) as resp:
resp.raise_for_status()
chunks: list[bytes] = []
total = 0
for chunk in resp.iter_bytes(chunk_size=64 * 1024):
total += len(chunk)
if total > MAX_FIRMWARE_SIZE:
raise HTTPException(
status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
f"firmware exceeds {MAX_FIRMWARE_SIZE} bytes",
)
chunks.append(chunk)
data = b"".join(chunks)
except httpx.HTTPError as exc:
raise HTTPException(status.HTTP_502_BAD_GATEWAY, f"download failed: {exc}") from exc
name = payload.name or os.path.basename(url.split("?")[0]) or "firmware.bin"
sha = hashlib.sha256(data).hexdigest()
# 3) Дедуп по sha256 (на случай разных URL с тем же содержимым).
existing = db.query(Firmware).filter(Firmware.sha256 == sha).first()
if existing:
return existing
rec = Firmware(
name=name,
version=payload.version,
architecture=payload.architecture,
channel=payload.channel,
size=len(data),
sha256=sha,
source_url=url,
content=data,
)
db.add(rec)
db.commit()
db.refresh(rec)
return rec
def _download_firmware_url(url: str) -> bytes:
with httpx.stream("GET", url, follow_redirects=True, timeout=180.0) as resp:
resp.raise_for_status()
chunks: list[bytes] = []
total = 0
for chunk in resp.iter_bytes(chunk_size=64 * 1024):
total += len(chunk)
if total > MAX_FIRMWARE_SIZE:
raise HTTPException(
status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
f"firmware exceeds {MAX_FIRMWARE_SIZE} bytes",
)
chunks.append(chunk)
return b"".join(chunks)
@router.post("/import-bulk", response_model=FirmwareBulkOut)
def import_bulk(
payload: FirmwareBulkImportIn,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> FirmwareBulkOut:
"""Загрузить .npk для указанной версии по списку архитектур одним вызовом."""
results: list[FirmwareBulkResult] = []
base = "https://download.mikrotik.com/routeros"
for arch in payload.architectures:
url = f"{base}/{payload.version}/routeros-{payload.version}-{arch}.npk"
# Дедуп до закачки: по URL или (version+architecture).
existing = (
db.query(Firmware)
.filter(
(Firmware.source_url == url)
| ((Firmware.version == payload.version) & (Firmware.architecture == arch))
)
.first()
)
if existing:
results.append(FirmwareBulkResult(
architecture=arch, ok=True, firmware_id=existing.id, skipped=True,
))
continue
try:
data = _download_firmware_url(url)
sha = hashlib.sha256(data).hexdigest()
# Дедуп по содержимому.
existing = db.query(Firmware).filter(Firmware.sha256 == sha).first()
if existing:
results.append(FirmwareBulkResult(
architecture=arch, ok=True, firmware_id=existing.id, skipped=True,
))
continue
rec = Firmware(
name=os.path.basename(url),
version=payload.version,
architecture=arch,
channel=payload.channel,
size=len(data),
sha256=sha,
source_url=url,
content=data,
)
db.add(rec)
db.commit()
db.refresh(rec)
results.append(FirmwareBulkResult(architecture=arch, ok=True, firmware_id=rec.id))
except HTTPException as exc:
results.append(FirmwareBulkResult(architecture=arch, ok=False, error=str(exc.detail)))
except httpx.HTTPError as exc:
results.append(FirmwareBulkResult(architecture=arch, ok=False, error=str(exc)))
return FirmwareBulkOut(version=payload.version, channel=payload.channel, results=results)
# routeros-7.16.1-arm64.npk / routeros-7.16.1-arm-7.16.1.npk и т.п.
_FW_NAME_RE = re.compile(
r"^routeros-(?P<version>\d+(?:\.\d+){1,2}(?:[a-z0-9.\-]*)?)-(?P<arch>[a-z0-9_]+)\.npk$",
re.IGNORECASE,
)
def _guess_meta(filename: str) -> tuple[str | None, str | None]:
"""Из имени файла вытащить (version, architecture). Возвращает (None, None) если не разобрали."""
m = _FW_NAME_RE.match(filename.strip().lower())
if not m:
return None, None
return m.group("version"), m.group("arch")
@router.post("/upload", response_model=FirmwareOut, status_code=status.HTTP_201_CREATED)
async def upload_firmware(
file: UploadFile = File(..., description=".npk файл прошивки RouterOS"),
name: str | None = Form(None),
version: str | None = Form(None),
architecture: str | None = Form(None),
channel: str | None = Form(None),
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> Firmware:
"""Загрузка прошивки вручную с диска пользователя (multipart/form-data).
Если `version`/`architecture` не указаны — попытка распарсить из имени файла
(формат `routeros-<version>-<arch>.npk`). Дедуп по sha256 / (version+architecture).
"""
fname = (name or file.filename or "firmware.bin").strip()
if not fname.lower().endswith(".npk"):
# Не блокируем строго, но предупреждаем — RouterOS принимает только .npk.
# Разрешаем — пусть админ сам решает.
pass
# Читаем тело с лимитом
chunks: list[bytes] = []
total = 0
while True:
chunk = await file.read(64 * 1024)
if not chunk:
break
total += len(chunk)
if total > MAX_FIRMWARE_SIZE:
raise HTTPException(
status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
f"firmware exceeds {MAX_FIRMWARE_SIZE} bytes",
)
chunks.append(chunk)
data = b"".join(chunks)
if not data:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "empty file")
# Автоопределение метаданных из имени файла
if not version or not architecture:
guessed_ver, guessed_arch = _guess_meta(fname)
version = version or guessed_ver
architecture = architecture or guessed_arch
sha = hashlib.sha256(data).hexdigest()
# Дедуп: по sha256 → возвращаем существующую запись
existing = db.query(Firmware).filter(Firmware.sha256 == sha).first()
if existing:
return existing
# Дедуп по (version, architecture)
if version and architecture:
existing = (
db.query(Firmware)
.filter(
Firmware.version == version,
Firmware.architecture == architecture,
)
.first()
)
if existing:
return existing
rec = Firmware(
name=fname,
version=version,
architecture=architecture,
channel=channel,
size=len(data),
sha256=sha,
source_url=None,
content=data,
)
db.add(rec)
db.commit()
db.refresh(rec)
return rec
@router.patch("/{firmware_id}", response_model=FirmwareOut)
def update_firmware(
firmware_id: int,
payload: FirmwareUpdateIn,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin", "operator")),
) -> Firmware:
rec = db.get(Firmware, firmware_id)
if not rec:
raise HTTPException(status.HTTP_404_NOT_FOUND, "firmware not found")
for k, v in payload.model_dump(exclude_unset=True).items():
setattr(rec, k, v)
db.commit()
db.refresh(rec)
return rec
@router.get("/{firmware_id}/download")
def download_firmware(
firmware_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> Response:
rec = db.get(Firmware, firmware_id)
if not rec:
raise HTTPException(status.HTTP_404_NOT_FOUND, "firmware not found")
return Response(
content=rec.content,
media_type="application/octet-stream",
headers={"Content-Disposition": f'attachment; filename="{rec.name}"'},
)
@router.delete("/{firmware_id}", status_code=status.HTTP_204_NO_CONTENT, response_class=Response)
def delete_firmware(
firmware_id: int,
db: Session = Depends(get_db),
_: User = Depends(require_role("admin")),
) -> Response:
rec = db.get(Firmware, firmware_id)
if not rec:
raise HTTPException(status.HTTP_404_NOT_FOUND, "firmware not found")
db.delete(rec)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
+16
View File
@@ -0,0 +1,16 @@
from fastapi import APIRouter
APP_NAME = "ROSzetta"
APP_VERSION = "0.6.0"
router = APIRouter()
@router.get("/health")
def health() -> dict[str, str]:
return {"status": "ok"}
@router.get("/version")
def version() -> dict[str, str]:
return {"name": APP_NAME, "version": APP_VERSION}
+117
View File
@@ -0,0 +1,117 @@
from __future__ import annotations
from datetime import datetime, timedelta, timezone
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from ...core.db import get_db
from ...models.device import Device
from ...models.metric import DeviceMetric
from ...models.user import User
from ...schemas.metric import MetricPoint
from ..deps import get_current_user
router = APIRouter()
@router.get("/devices/{device_id}/metrics", response_model=list[MetricPoint])
def get_metrics(
device_id: int,
hours: int = 24,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> list[MetricPoint]:
if not db.get(Device, device_id):
raise HTTPException(status.HTTP_404_NOT_FOUND, "device not found")
since = datetime.now(timezone.utc) - timedelta(hours=max(1, min(hours, 24 * 30)))
rows = (
db.query(DeviceMetric)
.filter(DeviceMetric.device_id == device_id, DeviceMetric.created_at >= since)
.order_by(DeviceMetric.created_at.asc())
.all()
)
return [
MetricPoint(
ts=r.created_at,
cpu_load=r.cpu_load,
mem_used_pct=r.mem_used_pct,
uptime_seconds=r.uptime_seconds,
internet_ok=r.internet_ok,
rx_bps=r.rx_bps,
tx_bps=r.tx_bps,
)
for r in rows
]
@router.get("/heartbeat")
def heartbeat(
hours: float = 24,
bins: int = 48,
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> dict:
"""Сводка статусов всех устройств по бинам времени для heartbeat-графика.
Каждый бин получает один из статусов:
- "up" — есть метрика, internet_ok != False
- "no-net" — есть метрика, internet_ok == False
- "down" — нет ни одной метрики в окне
- "none" — нет данных вообще
Приоритет внутри бина: down/no-net > up.
"""
hours = max(0.25, min(float(hours), 24 * 7))
bins = max(6, min(bins, 288))
now = datetime.now(timezone.utc)
since = now - timedelta(hours=hours)
bin_seconds = (hours * 3600) / bins
# Один сэмпл «закрашивает» окно вокруг себя, чтобы не было полосатости,
# когда интервал опроса больше длины бина (например, 1 мин probe и 30 сек бин).
halo_seconds = max(bin_seconds * 1.5, 90.0)
devices = db.query(Device).order_by(Device.name.asc()).all()
rows = (
db.query(DeviceMetric)
.filter(DeviceMetric.created_at >= since - timedelta(seconds=halo_seconds))
.order_by(DeviceMetric.created_at.asc())
.all()
)
by_dev: dict[int, list[DeviceMetric]] = {}
for r in rows:
by_dev.setdefault(r.device_id, []).append(r)
# Приоритет: no-net побеждает up; down/none перекрываются любой выборкой.
def _promote(cur: str, new: str) -> str:
if new == "no-net":
return "no-net"
if cur in ("none", "down") and new == "up":
return "up"
return cur
out_devices = []
for dev in devices:
buckets = ["none"] * bins
for r in by_dev.get(dev.id, []):
ts = r.created_at
if ts.tzinfo is None:
ts = ts.replace(tzinfo=timezone.utc)
offset = (ts - since).total_seconds()
lo = int((offset - halo_seconds) // bin_seconds)
hi = int((offset + halo_seconds) // bin_seconds)
new_state = "no-net" if r.internet_ok is False else "up"
for idx in range(max(0, lo), min(bins, hi + 1)):
buckets[idx] = _promote(buckets[idx], new_state)
out_devices.append({
"id": dev.id,
"name": dev.identity or dev.name,
"host": dev.host,
"status": dev.status,
"buckets": buckets,
})
return {
"since": since.isoformat(),
"until": now.isoformat(),
"bins": bins,
"hours": hours,
"devices": out_devices,
}
+57
View File
@@ -0,0 +1,57 @@
from __future__ import annotations
from typing import Any
from fastapi import APIRouter, Body, Depends
from sqlalchemy.orm import Session
from ...core.db import get_db
from ...models.user import User
from ...services.settings import get_settings_dict, update_settings_dict
from ...services import telegram as tg
from ..deps import get_current_user, require_role
router = APIRouter()
@router.get("")
def get_settings_endpoint(
db: Session = Depends(get_db),
_: User = Depends(get_current_user),
) -> dict[str, Any]:
s = get_settings_dict(db)
# Маскируем токен бота при отдаче
tg_cfg = s.get("telegram", {})
if tg_cfg.get("bot_token"):
tg_cfg = {**tg_cfg, "bot_token_masked": "***" + tg_cfg["bot_token"][-4:]}
# Сам токен в открытую тоже отдаём админам через /settings (для редактирования)
return s
@router.put("")
def put_settings_endpoint(
patch: dict[str, Any] = Body(...),
db: Session = Depends(get_db),
_: User = Depends(require_role("admin")),
) -> dict[str, Any]:
out = update_settings_dict(db, patch)
# Если изменён интервал автоопроса — переплинируем джобу.
new_pm = (out.get("ui") or {}).get("probe_interval_minutes")
if isinstance(new_pm, int):
from ...main import reschedule_probe_job
try:
reschedule_probe_job(new_pm)
except Exception: # pragma: no cover
pass
return out
@router.post("/telegram/test")
def telegram_test(
db: Session = Depends(get_db),
_: User = Depends(require_role("admin")),
) -> dict[str, Any]:
s = get_settings_dict(db)
cfg = s.get("telegram", {})
ok, msg = tg.test_credentials(cfg.get("bot_token", ""), cfg.get("chat_id", ""))
return {"ok": ok, "message": msg}
View File
+68
View File
@@ -0,0 +1,68 @@
from __future__ import annotations
from loguru import logger
from sqlalchemy.orm import Session
from .config import get_settings
from .db import Base, SessionLocal, engine
from .security import hash_password
from ..models.user import User
def init_db() -> None:
# Импортируем модели, чтобы они зарегистрировались в Base.metadata
from ..models import device as _device # noqa: F401
from ..models import user as _user # noqa: F401
from ..models import backup as _backup # noqa: F401
from ..models import firmware as _firmware # noqa: F401
from ..models import alert as _alert # noqa: F401
from ..models import metric as _metric # noqa: F401
from ..models import settings as _settings # noqa: F401
from ..models import interface_stat as _ifs # noqa: F401
Base.metadata.create_all(bind=engine)
_ensure_columns()
_ensure_admin()
def _ensure_columns() -> None:
"""Лёгкие миграции на ALTER TABLE для совместимости со старыми БД."""
from sqlalchemy import text
statements = [
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS last_error TEXT",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS internet_ok BOOLEAN",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS last_uptime_seconds INTEGER",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS abnormal_reboot BOOLEAN NOT NULL DEFAULT FALSE",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS last_log_warning TEXT",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS monitored_interfaces TEXT",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS uplink_interfaces TEXT",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS interface_history_hours INTEGER NOT NULL DEFAULT 24",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS kind VARCHAR(16) NOT NULL DEFAULT 'router'",
"ALTER TABLE devices ADD COLUMN IF NOT EXISTS architecture VARCHAR(32)",
]
with engine.begin() as conn:
for s in statements:
try:
conn.execute(text(s))
except Exception as exc: # pragma: no cover
logger.warning("migration failed: {} ({})", s, exc)
def _ensure_admin() -> None:
settings = get_settings()
db: Session = SessionLocal()
try:
exists = db.query(User).filter(User.email == settings.bootstrap_admin_email).first()
if exists:
return
admin = User(
email=settings.bootstrap_admin_email,
hashed_password=hash_password(settings.bootstrap_admin_password),
role="admin",
is_active=True,
)
db.add(admin)
db.commit()
logger.info("Created bootstrap admin: {}", settings.bootstrap_admin_email)
finally:
db.close()
+49
View File
@@ -0,0 +1,49 @@
from __future__ import annotations
from functools import lru_cache
from typing import List
from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
model_config = SettingsConfigDict(env_file=".env", extra="ignore")
app_env: str = "dev"
secret_key: str = "dev-secret-change-me"
access_token_expire_minutes: int = 60
refresh_token_expire_days: int = 14
database_url: str = (
"postgresql+psycopg2://mikrocloud:mikrocloud@postgres:5432/mikrocloud"
)
redis_url: str = "redis://redis:6379/0"
s3_endpoint: str = "http://minio:9000"
s3_access_key: str = "minio"
s3_secret_key: str = "minio12345"
s3_bucket: str = "mikrocloud-backups"
bootstrap_admin_email: str = "admin"
bootstrap_admin_password: str = "admin"
cors_origins: str = "http://localhost:5173"
# sprint 06: периодические задачи
firmware_check_interval_hours: int = 24
device_probe_interval_minutes: int = 5
# sprint 08: push-доставка бэкапов
backup_ftp_host: str = "0.0.0.0"
backup_ftp_port: int = 2121
backup_push_host: str = "" # пусто → автоопределение detect_push_host()
@property
def cors_origins_list(self) -> List[str]:
return [o.strip() for o in self.cors_origins.split(",") if o.strip()]
@lru_cache
def get_settings() -> Settings:
return Settings()
+29
View File
@@ -0,0 +1,29 @@
from __future__ import annotations
from sqlalchemy import create_engine
from sqlalchemy.orm import DeclarativeBase, Session, sessionmaker
from .config import get_settings
settings = get_settings()
connect_args: dict = {}
if settings.database_url.startswith("sqlite"):
connect_args["check_same_thread"] = False
engine = create_engine(
settings.database_url, pool_pre_ping=True, future=True, connect_args=connect_args
)
SessionLocal = sessionmaker(bind=engine, autoflush=False, autocommit=False)
class Base(DeclarativeBase):
pass
def get_db():
db: Session = SessionLocal()
try:
yield db
finally:
db.close()
+76
View File
@@ -0,0 +1,76 @@
from __future__ import annotations
from datetime import datetime, timedelta, timezone
from typing import Any
from cryptography.fernet import Fernet
from jose import JWTError, jwt
from passlib.context import CryptContext
from .config import get_settings
settings = get_settings()
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
ALGORITHM = "HS256"
def hash_password(password: str) -> str:
return pwd_context.hash(password)
def verify_password(plain: str, hashed: str) -> bool:
return pwd_context.verify(plain, hashed)
def _now() -> datetime:
return datetime.now(timezone.utc)
def create_access_token(subject: str | int, extra: dict[str, Any] | None = None) -> str:
payload: dict[str, Any] = {
"sub": str(subject),
"type": "access",
"iat": _now(),
"exp": _now() + timedelta(minutes=settings.access_token_expire_minutes),
}
if extra:
payload.update(extra)
return jwt.encode(payload, settings.secret_key, algorithm=ALGORITHM)
def create_refresh_token(subject: str | int) -> str:
payload = {
"sub": str(subject),
"type": "refresh",
"iat": _now(),
"exp": _now() + timedelta(days=settings.refresh_token_expire_days),
}
return jwt.encode(payload, settings.secret_key, algorithm=ALGORITHM)
def decode_token(token: str) -> dict[str, Any]:
try:
return jwt.decode(token, settings.secret_key, algorithms=[ALGORITHM])
except JWTError as exc: # pragma: no cover
raise ValueError(f"invalid token: {exc}") from exc
# --- Симметричное шифрование секретов устройств -----------------------------
# Производный ключ из SECRET_KEY (для dev). В prod — KMS / Vault.
def _fernet() -> Fernet:
import base64
import hashlib
digest = hashlib.sha256(settings.secret_key.encode()).digest()
key = base64.urlsafe_b64encode(digest)
return Fernet(key)
def encrypt_secret(value: str) -> str:
return _fernet().encrypt(value.encode()).decode()
def decrypt_secret(token: str) -> str:
return _fernet().decrypt(token.encode()).decode()
+232
View File
@@ -0,0 +1,232 @@
from __future__ import annotations
from contextlib import asynccontextmanager
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from .api.router import api_router
from .core.bootstrap import init_db
from .core.config import get_settings
from .core.db import SessionLocal
def _job_firmware_check() -> None:
from .services.firmware_check import check_and_alert
db = SessionLocal()
try:
check_and_alert(db)
except Exception as exc: # pragma: no cover
logger.warning("firmware check job failed: {}", exc)
finally:
db.close()
def _job_probe_devices() -> None:
"""Периодически опрашивает все устройства, обновляет метрики/алерты."""
from .models.device import Device
from .models.metric import DeviceMetric
from .models.interface_stat import InterfaceStat
from .core.security import decrypt_secret
from .services.events import add_alert
from .services.routeros.client import (
RouterOSCredentials, RouterOSError, check_internet,
fetch_identity, fetch_interface_stats, fetch_resource, parse_uptime,
)
from datetime import datetime, timedelta, timezone
db = SessionLocal()
try:
for d in db.query(Device).all():
creds = RouterOSCredentials(
host=d.host, username=d.username,
password=decrypt_secret(d.password_enc),
port=d.port, use_tls=d.use_tls, timeout=5.0,
)
try:
res = fetch_resource(creds)
ident = fetch_identity(creds)
except RouterOSError as exc:
if d.status != "down":
add_alert(db, severity="error", category="device",
source=f"device:{d.id}",
title=f"Устройство недоступно: {d.identity or d.name}",
message=str(exc))
d.status = "down"
d.last_error = str(exc)
db.commit()
continue
d.identity = ident or d.identity
d.model = res.get("board-name") or d.model
d.ros_version = res.get("version") or d.ros_version
d.architecture = res.get("architecture-name") or d.architecture
prev_status = d.status
d.status = "up"
d.last_error = None
d.last_seen = datetime.now(timezone.utc)
uptime_s = parse_uptime(res.get("uptime"))
if uptime_s is not None and d.last_uptime_seconds is not None:
if uptime_s < d.last_uptime_seconds - 60:
d.abnormal_reboot = True
add_alert(db, severity="warning", category="abnormal_reboot",
source=f"device:{d.id}",
title=f"Аварийный перезапуск: {d.identity or d.name}",
message=f"uptime {d.last_uptime_seconds}s → {uptime_s}s")
else:
d.abnormal_reboot = False
d.last_uptime_seconds = uptime_s
try:
d.internet_ok = check_internet(creds)
except Exception:
d.internet_ok = None
if prev_status == "down":
add_alert(db, severity="info", category="device",
source=f"device:{d.id}",
title=f"Устройство снова онлайн: {d.identity or d.name}")
def _i(v):
try: return int(v) if v is not None else None
except: return None # noqa: E722
cpu = _i(res.get("cpu-load"))
free_mem = _i(res.get("free-memory"))
total_mem = _i(res.get("total-memory"))
mem_pct = None
if free_mem is not None and total_mem and total_mem > 0:
mem_pct = round(100 - (free_mem / total_mem) * 100, 1)
db.add(DeviceMetric(
device_id=d.id,
cpu_load=float(cpu) if cpu is not None else None,
mem_used_pct=mem_pct,
free_memory=free_mem, total_memory=total_mem,
uptime_seconds=uptime_s, internet_ok=d.internet_ok,
))
# ---- Sprint 09: счётчики выбранных интерфейсов ----
mon = (d.monitored_interfaces or "").strip()
up = (d.uplink_interfaces or "").strip()
wanted = {x.strip() for x in mon.split(",") if x.strip()}
wanted |= {x.strip() for x in up.split(",") if x.strip()}
if wanted:
try:
iface_rows = fetch_interface_stats(creds)
now_ts = datetime.now(timezone.utc)
for r in iface_rows:
if r["name"] in wanted:
db.add(InterfaceStat(
device_id=d.id, name=r["name"],
rx_bytes=r["rx_bytes"], tx_bytes=r["tx_bytes"],
running=r["running"], ts=now_ts,
))
# ретенция: глубина в часах
keep_hours = int(d.interface_history_hours or 24)
cutoff = now_ts - timedelta(hours=keep_hours)
db.query(InterfaceStat).filter(
InterfaceStat.device_id == d.id,
InterfaceStat.ts < cutoff,
).delete(synchronize_session=False)
except RouterOSError as exc:
logger.debug("iface stats failed for {}: {}", d.host, exc)
db.commit()
except Exception as exc: # pragma: no cover
logger.warning("probe job failed: {}", exc)
finally:
db.close()
_scheduler: AsyncIOScheduler | None = None
# Допустимые интервалы автоопроса (мин), используются для clamp/валидации.
ALLOWED_PROBE_MINUTES: tuple[int, ...] = (1, 2, 3, 5, 10)
def reschedule_probe_job(minutes: int) -> int:
"""Изменяет интервал джобы probe_devices на лету. Возвращает применённое значение."""
global _scheduler
if minutes not in ALLOWED_PROBE_MINUTES:
# ближайшее снизу из разрешённых
minutes = max((m for m in ALLOWED_PROBE_MINUTES if m <= minutes), default=ALLOWED_PROBE_MINUTES[0])
if _scheduler is None:
return minutes
_scheduler.reschedule_job("probe_devices", trigger="interval", minutes=minutes)
logger.info("probe_devices job rescheduled: every {}m", minutes)
return minutes
@asynccontextmanager
async def lifespan(_: FastAPI):
global _scheduler
settings = get_settings()
logger.info("Starting ROSzetta API ({} env)", settings.app_env)
init_db()
# FTP-сервер для приёма push-бэкапов от MikroTik
try:
from .services.backup_ftp_server import start_server
start_server(host=settings.backup_ftp_host, port=settings.backup_ftp_port)
except Exception as exc: # pragma: no cover
logger.warning("Backup FTP server failed to start: {}", exc)
# Стартовый интервал берём из настроек БД (если уже сохранены), иначе из env.
probe_minutes = settings.device_probe_interval_minutes
try:
from .services.settings import get_settings_dict
db = SessionLocal()
try:
s = get_settings_dict(db)
ui_pm = (s.get("ui") or {}).get("probe_interval_minutes")
if isinstance(ui_pm, int) and ui_pm in ALLOWED_PROBE_MINUTES:
probe_minutes = ui_pm
finally:
db.close()
except Exception as exc: # pragma: no cover
logger.warning("could not load probe interval from settings: {}", exc)
_scheduler = AsyncIOScheduler(timezone="UTC")
from datetime import datetime, timedelta, timezone
now = datetime.now(timezone.utc)
_scheduler.add_job(
_job_firmware_check, "interval",
hours=max(1, settings.firmware_check_interval_hours),
id="firmware_check",
next_run_time=now + timedelta(seconds=30),
)
_scheduler.add_job(
_job_probe_devices, "interval",
minutes=max(1, probe_minutes),
id="probe_devices",
next_run_time=now + timedelta(seconds=10),
)
_scheduler.start()
logger.info("Scheduler started: firmware/{}h, probe/{}m",
settings.firmware_check_interval_hours, probe_minutes)
yield
if _scheduler:
_scheduler.shutdown(wait=False)
try:
from .services.backup_ftp_server import stop_server
stop_server()
except Exception: # pragma: no cover
pass
logger.info("Shutting down")
def create_app() -> FastAPI:
settings = get_settings()
app = FastAPI(
title="ROSzetta API",
version="0.1.0",
lifespan=lifespan,
)
app.add_middleware(
CORSMiddleware,
allow_origins=settings.cors_origins_list,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router)
return app
app = create_app()
View File
+23
View File
@@ -0,0 +1,23 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import Boolean, DateTime, Integer, String, Text, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class Alert(Base):
__tablename__ = "alerts"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
severity: Mapped[str] = mapped_column(String(16), nullable=False, default="info") # info|warning|error|critical
category: Mapped[str] = mapped_column(String(32), nullable=False, default="system") # firmware|backup|device|security|system
source: Mapped[str | None] = mapped_column(String(64)) # device id / module name
title: Mapped[str] = mapped_column(String(255), nullable=False)
message: Mapped[str | None] = mapped_column(Text)
acknowledged: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
+25
View File
@@ -0,0 +1,25 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import DateTime, ForeignKey, Integer, LargeBinary, String, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class DeviceBackup(Base):
__tablename__ = "device_backups"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
device_id: Mapped[int] = mapped_column(
Integer, ForeignKey("devices.id", ondelete="CASCADE"), index=True, nullable=False
)
filename: Mapped[str] = mapped_column(String(255), nullable=False)
# 'binary' (.backup) или 'text' (.rsc)
fmt: Mapped[str] = mapped_column(String(16), nullable=False)
size: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
content: Mapped[bytes] = mapped_column(LargeBinary, nullable=False)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
+49
View File
@@ -0,0 +1,49 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import DateTime, Integer, String, Text, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class Device(Base):
__tablename__ = "devices"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
name: Mapped[str] = mapped_column(String(128), nullable=False)
# 'router' | 'switch' — вид устройства (разнесение в разделы Devices / Свичи)
kind: Mapped[str] = mapped_column(String(16), default="router", nullable=False)
host: Mapped[str] = mapped_column(String(255), nullable=False, index=True)
port: Mapped[int] = mapped_column(Integer, default=8729, nullable=False)
use_tls: Mapped[bool] = mapped_column(default=True, nullable=False)
username: Mapped[str] = mapped_column(String(64), nullable=False)
# Шифруется через core.security.encrypt_secret
password_enc: Mapped[str] = mapped_column(Text, nullable=False)
# Метаданные с устройства
identity: Mapped[str | None] = mapped_column(String(128))
model: Mapped[str | None] = mapped_column(String(64))
serial: Mapped[str | None] = mapped_column(String(64))
ros_version: Mapped[str | None] = mapped_column(String(32))
# Архитектура платформы RouterOS: arm64 / arm / mipsbe / mmips / mipsle / smips / tile / ppc / x86 / x86_64
architecture: Mapped[str | None] = mapped_column(String(32))
status: Mapped[str] = mapped_column(String(16), default="unknown", nullable=False)
last_error: Mapped[str | None] = mapped_column(Text)
last_seen: Mapped[datetime | None] = mapped_column(DateTime(timezone=True))
# Sprint 06
internet_ok: Mapped[bool | None] = mapped_column()
last_uptime_seconds: Mapped[int | None] = mapped_column(Integer)
abnormal_reboot: Mapped[bool] = mapped_column(default=False, nullable=False)
last_log_warning: Mapped[str | None] = mapped_column(Text)
# Sprint 09 — мониторинг интерфейсов
# CSV-список имён интерфейсов, по которым собирать графики rx/tx (через запятую)
monitored_interfaces: Mapped[str | None] = mapped_column(Text)
# CSV-список аплинков (uztelecom/lte/...): для индикатора "интернет на интерфейсе X"
uplink_interfaces: Mapped[str | None] = mapped_column(Text)
# глубина хранения статистики интерфейсов (часы)
interface_history_hours: Mapped[int] = mapped_column(Integer, default=24, nullable=False)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
+25
View File
@@ -0,0 +1,25 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import DateTime, Integer, LargeBinary, String, Text, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class Firmware(Base):
__tablename__ = "firmwares"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
name: Mapped[str] = mapped_column(String(255), nullable=False)
version: Mapped[str | None] = mapped_column(String(64))
architecture: Mapped[str | None] = mapped_column(String(32))
channel: Mapped[str | None] = mapped_column(String(32)) # stable/long-term/testing
size: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
sha256: Mapped[str | None] = mapped_column(String(64))
source_url: Mapped[str | None] = mapped_column(Text)
content: Mapped[bytes] = mapped_column(LargeBinary, nullable=False)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
+34
View File
@@ -0,0 +1,34 @@
"""Метрики интерфейсов: счётчики rx/tx и состояние running.
Фиксируется значение счётчиков (монотонно растущих, до перезагрузки),
во время каждого probe-цикла. На фронте берутся последние ~N точек,
для отрисовки графика bps вычисляется (delta/seconds).
"""
from __future__ import annotations
from datetime import datetime
from sqlalchemy import BigInteger, Boolean, DateTime, ForeignKey, Index, Integer, String, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class InterfaceStat(Base):
__tablename__ = "interface_stats"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
device_id: Mapped[int] = mapped_column(
ForeignKey("devices.id", ondelete="CASCADE"), index=True, nullable=False
)
name: Mapped[str] = mapped_column(String(64), nullable=False)
rx_bytes: Mapped[int] = mapped_column(BigInteger, nullable=False, default=0)
tx_bytes: Mapped[int] = mapped_column(BigInteger, nullable=False, default=0)
running: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)
ts: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), nullable=False, index=True
)
__table_args__ = (
Index("ix_iface_stats_dev_name_ts", "device_id", "name", "ts"),
)
+32
View File
@@ -0,0 +1,32 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import DateTime, Float, ForeignKey, Index, Integer, String, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class DeviceMetric(Base):
__tablename__ = "device_metrics"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
device_id: Mapped[int] = mapped_column(
ForeignKey("devices.id", ondelete="CASCADE"), nullable=False, index=True
)
cpu_load: Mapped[float | None] = mapped_column(Float)
mem_used_pct: Mapped[float | None] = mapped_column(Float)
free_memory: Mapped[int | None] = mapped_column(Integer)
total_memory: Mapped[int | None] = mapped_column(Integer)
uptime_seconds: Mapped[int | None] = mapped_column(Integer)
internet_ok: Mapped[bool | None] = mapped_column()
rx_bps: Mapped[int | None] = mapped_column(Integer)
tx_bps: Mapped[int | None] = mapped_column(Integer)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), nullable=False, index=True
)
__table_args__ = (
Index("ix_device_metrics_device_time", "device_id", "created_at"),
)
+19
View File
@@ -0,0 +1,19 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import DateTime, Integer, String, Text, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class AppSetting(Base):
__tablename__ = "app_settings"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
key: Mapped[str] = mapped_column(String(64), unique=True, nullable=False)
value: Mapped[str] = mapped_column(Text, nullable=False, default="{}")
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False
)
+21
View File
@@ -0,0 +1,21 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import Boolean, DateTime, Integer, String, func
from sqlalchemy.orm import Mapped, mapped_column
from ..core.db import Base
class User(Base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
email: Mapped[str] = mapped_column(String(255), unique=True, index=True, nullable=False)
hashed_password: Mapped[str] = mapped_column(String(255), nullable=False)
role: Mapped[str] = mapped_column(String(32), default="viewer", nullable=False)
is_active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
View File
+19
View File
@@ -0,0 +1,19 @@
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel
class AlertOut(BaseModel):
id: int
severity: str
category: str
source: str | None = None
title: str
message: str | None = None
acknowledged: bool
created_at: datetime
class Config:
from_attributes = True
+31
View File
@@ -0,0 +1,31 @@
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel
class UserOut(BaseModel):
id: int
email: str
role: str
is_active: bool
created_at: datetime
class Config:
from_attributes = True
class LoginIn(BaseModel):
email: str
password: str
class TokenPair(BaseModel):
access_token: str
refresh_token: str
token_type: str = "bearer"
class RefreshIn(BaseModel):
refresh_token: str
+17
View File
@@ -0,0 +1,17 @@
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel
class BackupOut(BaseModel):
id: int
device_id: int
filename: str
fmt: str
size: int
created_at: datetime
class Config:
from_attributes = True
+65
View File
@@ -0,0 +1,65 @@
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel, Field
class DeviceBase(BaseModel):
name: str = Field(min_length=1, max_length=128)
host: str
port: int = 8729
use_tls: bool = True
username: str
kind: str = "router"
class DeviceCreate(DeviceBase):
password: str
class DeviceUpdate(BaseModel):
name: str | None = None
host: str | None = None
port: int | None = None
use_tls: bool | None = None
username: str | None = None
password: str | None = None
kind: str | None = None
monitored_interfaces: str | None = None
uplink_interfaces: str | None = None
interface_history_hours: int | None = None
class DeviceOut(DeviceBase):
id: int
identity: str | None = None
model: str | None = None
serial: str | None = None
ros_version: str | None = None
architecture: str | None = None
status: str
last_error: str | None = None
last_seen: datetime | None = None
internet_ok: bool | None = None
last_uptime_seconds: int | None = None
abnormal_reboot: bool = False
last_log_warning: str | None = None
monitored_interfaces: str | None = None
uplink_interfaces: str | None = None
interface_history_hours: int = 24
created_at: datetime
class Config:
from_attributes = True
class DeviceResource(BaseModel):
"""Срез `/system/resource`."""
cpu_load: int | None = None
free_memory: int | None = None
total_memory: int | None = None
uptime: str | None = None
version: str | None = None
board_name: str | None = None
architecture_name: str | None = None
+55
View File
@@ -0,0 +1,55 @@
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel, Field, HttpUrl
class FirmwareImportIn(BaseModel):
url: HttpUrl
name: str | None = None
version: str | None = None
architecture: str | None = None
channel: str | None = None
class FirmwareBulkImportIn(BaseModel):
version: str = Field(..., description="Например: 7.16.1")
channel: str | None = "stable"
architectures: list[str] = Field(..., min_length=1)
class FirmwareBulkResult(BaseModel):
architecture: str
ok: bool
firmware_id: int | None = None
error: str | None = None
skipped: bool = False
class FirmwareBulkOut(BaseModel):
version: str
channel: str | None
results: list[FirmwareBulkResult]
class FirmwareUpdateIn(BaseModel):
name: str | None = Field(default=None, max_length=255)
version: str | None = None
architecture: str | None = None
channel: str | None = None
class FirmwareOut(BaseModel):
id: int
name: str
version: str | None
architecture: str | None
channel: str | None
size: int
sha256: str | None
source_url: str | None
created_at: datetime
class Config:
from_attributes = True
+15
View File
@@ -0,0 +1,15 @@
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel
class MetricPoint(BaseModel):
ts: datetime
cpu_load: float | None = None
mem_used_pct: float | None = None
uptime_seconds: int | None = None
internet_ok: bool | None = None
rx_bps: int | None = None
tx_bps: int | None = None
View File
+215
View File
@@ -0,0 +1,215 @@
"""Встроенный FTP-сервер для приёма push-бэкапов от MikroTik.
Идея: вместо того чтобы открывать ssh/ftp на каждом устройстве и тянуть
с него файл, контроллер сам поднимает FTP на отдельном порту и выдаёт
устройству одноразовые креды. Устройство выполняет:
/tool fetch upload=yes mode=ftp address=<ctrl> port=<p> \
user=<u> password=<p> src-path=<file> dst-path=<file>
Файлы складываются во временную директорию сессии. По завершении
загрузки коллбэк `on_file_received` маркирует файл как готовый.
Бэкенд ждёт появления всех ожидаемых файлов и читает их.
Реализация — `pyftpdlib.servers.ThreadedFTPServer`, поднимается
в фоновом потоке и живёт вместе с процессом backend.
"""
from __future__ import annotations
import os
import secrets
import shutil
import socket
import tempfile
import threading
import time
from dataclasses import dataclass, field
from typing import Iterable
from loguru import logger
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import ThreadedFTPServer
@dataclass
class _Session:
session_id: str
username: str
password: str
home_dir: str
expected: set[str]
received: dict[str, str] = field(default_factory=dict) # name -> abs path
created_at: float = field(default_factory=time.time)
class _Server:
def __init__(self, host: str = "0.0.0.0", port: int = 2121) -> None:
self.host = host
self.port = port
self._sessions: dict[str, _Session] = {}
self._sessions_by_user: dict[str, _Session] = {}
self._lock = threading.RLock()
self._authorizer = DummyAuthorizer()
self._server: ThreadedFTPServer | None = None
self._thread: threading.Thread | None = None
self._root_tmp = tempfile.mkdtemp(prefix="mikbak-ftp-")
srv = self # closure для хэндлера
class _Handler(FTPHandler):
def on_file_received(self, file: str) -> None: # type: ignore[override]
try:
user = (self.username or "").strip()
name = os.path.basename(file)
srv._mark_received(user, name, file)
except Exception as exc: # pragma: no cover
logger.warning("FTP on_file_received error: {}", exc)
_Handler.authorizer = self._authorizer
_Handler.banner = "mikrocloud backup ftp ready"
# Пассивный диапазон фиксируем (нужно открыть в compose).
_Handler.passive_ports = range(30000, 30050)
self._handler_cls = _Handler
# ---------- lifecycle ----------
def start(self) -> None:
if self._server is not None:
return
self._server = ThreadedFTPServer((self.host, self.port), self._handler_cls)
self._server.max_cons = 64
self._thread = threading.Thread(
target=self._server.serve_forever,
name="backup-ftp",
daemon=True,
)
self._thread.start()
logger.info("Backup FTP server started on {}:{}", self.host, self.port)
def stop(self) -> None:
if self._server is None:
return
try:
self._server.close_all()
except Exception: # pragma: no cover
pass
self._server = None
self._thread = None
try:
shutil.rmtree(self._root_tmp, ignore_errors=True)
except Exception: # pragma: no cover
pass
logger.info("Backup FTP server stopped")
# ---------- sessions ----------
def open_session(self, expected_files: Iterable[str]) -> _Session:
"""Создаёт уникального пользователя и личный каталог."""
with self._lock:
sid = secrets.token_hex(8)
user = f"mb_{sid}"
password = secrets.token_urlsafe(18)
home = os.path.join(self._root_tmp, sid)
os.makedirs(home, exist_ok=True)
self._authorizer.add_user(user, password, home, perm="elradfmw")
sess = _Session(
session_id=sid,
username=user,
password=password,
home_dir=home,
expected=set(expected_files),
)
self._sessions[sid] = sess
self._sessions_by_user[user] = sess
logger.info("FTP backup session opened: sid={} user={} expected={}",
sid, user, sess.expected)
return sess
def close_session(self, session_id: str) -> None:
with self._lock:
sess = self._sessions.pop(session_id, None)
if sess is None:
return
self._sessions_by_user.pop(sess.username, None)
try:
self._authorizer.remove_user(sess.username)
except Exception: # pragma: no cover
pass
try:
shutil.rmtree(sess.home_dir, ignore_errors=True)
except Exception: # pragma: no cover
pass
logger.info("FTP backup session closed: sid={}", session_id)
def _mark_received(self, username: str, name: str, abs_path: str) -> None:
with self._lock:
sess = self._sessions_by_user.get(username)
if sess is None:
logger.warning("FTP upload from unknown user: {} ({})", username, name)
return
sess.received[name] = abs_path
logger.info("FTP backup file received: sid={} name={} size={}b",
sess.session_id, name, os.path.getsize(abs_path))
def wait_files(self, session_id: str, timeout: float = 60.0) -> dict[str, bytes]:
"""Ожидает поступления всех expected-файлов и возвращает их содержимое."""
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
with self._lock:
sess = self._sessions.get(session_id)
if sess is None:
raise RuntimeError(f"session {session_id} not found")
missing = sess.expected - set(sess.received.keys())
if not missing:
out: dict[str, bytes] = {}
for name, path in sess.received.items():
with open(path, "rb") as f:
out[name] = f.read()
return out
time.sleep(0.3)
with self._lock:
sess = self._sessions.get(session_id)
missing = sess.expected - set(sess.received.keys()) if sess else set()
raise TimeoutError(f"backup files not received: missing={sorted(missing)}")
_INSTANCE: _Server | None = None
_INSTANCE_LOCK = threading.Lock()
def get_server() -> _Server | None:
return _INSTANCE
def start_server(host: str = "0.0.0.0", port: int = 2121) -> _Server:
global _INSTANCE
with _INSTANCE_LOCK:
if _INSTANCE is None:
_INSTANCE = _Server(host=host, port=port)
_INSTANCE.start()
return _INSTANCE
def stop_server() -> None:
global _INSTANCE
with _INSTANCE_LOCK:
if _INSTANCE is not None:
_INSTANCE.stop()
_INSTANCE = None
def detect_push_host(default: str | None = None) -> str:
"""Подсказка: IP контроллера, как его видят устройства.
Берётся через udp-сокет к 8.8.8.8 (соединение не открывается).
Используется fallback, если в ENV не задан BACKUP_PUSH_HOST.
"""
if default:
return default
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(0.3)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
except Exception:
return "0.0.0.0"
+150
View File
@@ -0,0 +1,150 @@
"""Бэкап самого контроллера: дамп БД и/или конфигурации."""
from __future__ import annotations
import io
import json
import os
import subprocess
import tarfile
from datetime import datetime, timezone
from loguru import logger
from ..core.config import get_settings
def _safe_settings_dump() -> dict:
s = get_settings()
data = s.model_dump()
# маскируем секреты
for k in list(data.keys()):
if any(x in k.lower() for x in ("password", "secret", "key")):
data[k] = "***"
return data
def make_config_only_archive() -> tuple[str, bytes]:
"""Tar.gz с настройками контроллера (без БД)."""
buf = io.BytesIO()
ts = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
name = f"controller-config-{ts}.tar.gz"
settings_json = json.dumps(_safe_settings_dump(), indent=2, default=str).encode()
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
info = tarfile.TarInfo(name="settings.json")
info.size = len(settings_json)
info.mtime = int(datetime.now().timestamp())
tar.addfile(info, io.BytesIO(settings_json))
readme = (
b"ROSzetta - config-only backup\n"
b"Contains masked settings.json (no DB, no secrets).\n"
)
info2 = tarfile.TarInfo(name="README.txt")
info2.size = len(readme)
info2.mtime = int(datetime.now().timestamp())
tar.addfile(info2, io.BytesIO(readme))
return name, buf.getvalue()
def _dump_database() -> bytes:
"""Возвращает pg_dump БД (custom-format) либо raise."""
s = get_settings()
# parse postgresql+psycopg2://user:pass@host:port/db
url = s.database_url.replace("postgresql+psycopg2://", "postgresql://")
cmd = ["pg_dump", "-Fc", url]
logger.info("running pg_dump")
try:
out = subprocess.run(
cmd,
check=True,
capture_output=True,
timeout=300,
env={**os.environ},
)
except FileNotFoundError as exc:
raise RuntimeError("pg_dump not installed in backend image") from exc
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"pg_dump failed: {exc.stderr.decode(errors='replace')[:400]}") from exc
return out.stdout
def make_full_archive() -> tuple[str, bytes]:
"""Tar.gz с дампом БД + settings.json."""
buf = io.BytesIO()
ts = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
name = f"controller-full-{ts}.tar.gz"
db_dump = _dump_database()
settings_json = json.dumps(_safe_settings_dump(), indent=2, default=str).encode()
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
for fname, data in [
("db.dump", db_dump),
("settings.json", settings_json),
(
"README.txt",
b"ROSzetta - full backup\n"
b"Restore: pg_restore -d <db> db.dump\n",
),
]:
info = tarfile.TarInfo(name=fname)
info.size = len(data)
info.mtime = int(datetime.now().timestamp())
tar.addfile(info, io.BytesIO(data))
return name, buf.getvalue()
def restore_full_archive(data: bytes) -> dict:
"""Разворачивает full-бэкап: дроп схемы public + pg_restore из db.dump в архиве.
ВНИМАНИЕ: операция деструктивна. Текущая БД будет полностью заменена.
"""
s = get_settings()
try:
with tarfile.open(fileobj=io.BytesIO(data), mode="r:gz") as tar:
try:
member = tar.getmember("db.dump")
except KeyError as exc:
raise RuntimeError("Архив не содержит db.dump (нужен full backup)") from exc
f = tar.extractfile(member)
if f is None:
raise RuntimeError("Не удалось прочитать db.dump из архива")
dump_bytes = f.read()
except tarfile.TarError as exc:
raise RuntimeError(f"Невалидный tar.gz: {exc}") from exc
url = s.database_url.replace("postgresql+psycopg2://", "postgresql://")
logger.warning("controller restore: dropping schema public")
try:
subprocess.run(
["psql", url, "-v", "ON_ERROR_STOP=1", "-c",
"DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public;"],
check=True, capture_output=True, timeout=60, env={**os.environ},
)
except FileNotFoundError as exc:
raise RuntimeError("psql not installed in backend image") from exc
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"psql DROP SCHEMA failed: {exc.stderr.decode(errors='replace')[:400]}") from exc
logger.warning("controller restore: running pg_restore ({} bytes)", len(dump_bytes))
try:
proc = subprocess.run(
["pg_restore", "--no-owner", "--no-privileges", "-d", url],
input=dump_bytes,
check=True, capture_output=True, timeout=600, env={**os.environ},
)
except FileNotFoundError as exc:
raise RuntimeError("pg_restore not installed in backend image") from exc
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"pg_restore failed: {exc.stderr.decode(errors='replace')[:400]}") from exc
return {
"ok": True,
"message": "Бэкап успешно развёрнут. Перезайдите в систему — данные обновлены.",
"stderr": proc.stderr.decode(errors='replace')[:400] if proc.stderr else "",
}
+60
View File
@@ -0,0 +1,60 @@
from __future__ import annotations
from sqlalchemy.orm import Session
from ..models.alert import Alert
from .settings import get_settings_dict, severity_meets
from . import telegram as tg
# Соответствие категории алерта ключу notify-toggle.
_NOTIFY_KEY_BY_CATEGORY = {
"device": "device_status",
"internet": "internet",
"abnormal_reboot": "abnormal_reboot",
"firmware": "firmware",
}
def add_alert(
db: Session,
*,
title: str,
severity: str = "info",
category: str = "system",
source: str | None = None,
message: str | None = None,
) -> Alert | None:
"""Создаёт алерт с учётом включенных нотификаций. Возвращает None, если категория отключена."""
cfg = get_settings_dict(db)
notify_cfg = cfg.get("notify", {})
notify_key = _NOTIFY_KEY_BY_CATEGORY.get(category)
if notify_key is not None and notify_cfg.get(notify_key) is False:
return None
a = Alert(
title=title,
severity=severity,
category=category,
source=source,
message=message,
)
db.add(a)
db.commit()
db.refresh(a)
tg_cfg = cfg.get("telegram", {})
if tg_cfg.get("enabled") and severity_meets(severity, tg_cfg.get("min_severity", "warning")):
text = f"<b>[{severity.upper()}] {title}</b>"
if message:
text += f"\n{message}"
if source:
text += f"\n<i>src: {source}</i>"
tg.send_message(tg_cfg.get("bot_token", ""), tg_cfg.get("chat_id", ""), text)
return a
def add_audit(*args, **kwargs) -> None:
"""No-op. Аудит-логи удалены, функция оставлена как заглушка для совместимости."""
return None
+103
View File
@@ -0,0 +1,103 @@
"""Сервис проверки новых версий прошивок MikroTik по нескольким каналам."""
from __future__ import annotations
import json
import re
from datetime import datetime, timezone
import httpx
from loguru import logger
from sqlalchemy.orm import Session
from ..models.settings import AppSetting
from .events import add_alert
# Каналы и URL-ы для проверки.
CHANNELS: dict[str, str] = {
"stable": "https://download.mikrotik.com/routeros/NEWESTa7.stable",
"long-term": "https://download.mikrotik.com/routeros/NEWESTa7.long-term",
"testing": "https://download.mikrotik.com/routeros/NEWESTa7.testing",
}
STATE_KEY = "firmware_state"
def _fetch_channel(url: str, timeout: float = 10.0) -> tuple[str, datetime] | None:
try:
with httpx.Client(timeout=timeout, follow_redirects=True) as cli:
r = cli.get(url)
r.raise_for_status()
text = r.text.strip()
except httpx.HTTPError as exc:
logger.warning("firmware check: HTTP error for {}: {}", url, exc)
return None
m = re.match(r"(\S+)\s+(\d+)", text)
if not m:
logger.warning("firmware check: unexpected response for {}: {!r}", url, text[:120])
return None
return m.group(1), datetime.fromtimestamp(int(m.group(2)), tz=timezone.utc)
def _load_state(db: Session) -> dict:
row = db.query(AppSetting).filter(AppSetting.key == STATE_KEY).first()
if not row:
return {}
try:
return json.loads(row.value) or {}
except Exception:
return {}
def _save_state(db: Session, state: dict) -> None:
row = db.query(AppSetting).filter(AppSetting.key == STATE_KEY).first()
if not row:
row = AppSetting(key=STATE_KEY, value=json.dumps(state))
db.add(row)
else:
row.value = json.dumps(state)
db.commit()
def get_state(db: Session) -> dict:
"""Состояние проверок по каналам: {channel: {version, released_at, last_check}}."""
return _load_state(db)
def fetch_latest_version(timeout: float = 10.0) -> tuple[str, datetime] | None:
"""Backwards-compat: возвращает только stable."""
return _fetch_channel(CHANNELS["stable"], timeout=timeout)
def check_and_alert(db: Session) -> dict:
"""Проверяет все каналы. При появлении новой версии создаёт alert. Возвращает обновлённый state."""
state = _load_state(db)
now_iso = datetime.now(timezone.utc).isoformat()
for channel, url in CHANNELS.items():
res = _fetch_channel(url)
prev = (state.get(channel) or {}).get("version")
if res is None:
# сохраняем last_check всё равно, чтобы видеть попытку
state.setdefault(channel, {})["last_check"] = now_iso
state[channel]["last_check_ok"] = False
continue
version, released_at = res
state[channel] = {
"version": version,
"released_at": released_at.isoformat(),
"last_check": now_iso,
"last_check_ok": True,
}
if prev and prev != version:
add_alert(
db,
severity="info",
category="firmware",
source=f"mikrotik.com/{channel}",
title=f"RouterOS {channel}: новая версия {version}",
message=f"Предыдущая отслеживаемая: {prev}",
)
logger.info("firmware check {}: new version {} (was {})", channel, version, prev)
elif not prev:
logger.info("firmware check {}: initial = {}", channel, version)
_save_state(db, state)
return state
+177
View File
@@ -0,0 +1,177 @@
"""Создание бэкапа конфигурации MikroTik с PUSH-доставкой на контроллер.
Поток:
1. На устройстве запускается `/system/backup/save name=...` и `/export file=...`.
2. Ждём появления файлов в `/file`.
3. Контроллер открывает в своём встроенном FTP-сервере одноразовую сессию
(уникальный пользователь/пароль, изолированный каталог).
4. На устройстве выполняется `/tool fetch upload=yes mode=ftp ...`,
которое отправляет файлы НА контроллер. На MikroTik не нужно включать
ftp/ssh — нужен только исходящий доступ к контроллеру.
5. Бэкенд читает файлы из каталога сессии, удаляет файлы с устройства,
закрывает FTP-сессию и возвращает байты.
"""
from __future__ import annotations
import time
from dataclasses import dataclass
from typing import Any
from loguru import logger
from .client import RouterOSCredentials, RouterOSError, routeros_session
from ..backup_ftp_server import detect_push_host, get_server, start_server
@dataclass
class BackupFiles:
binary_name: str
binary_data: bytes
text_name: str
text_data: bytes
# ---------- helpers вокруг librouteros ----------
def _exec_path(api: Any, *path: str, **params: Any) -> list[dict[str, Any]]:
"""Выполнить RouterOS-команду. Последний сегмент — имя cmd для librouteros.
Пример: _exec_path(api, "system", "backup", "save", name="x")
=> api.path("system", "backup")("save", name="x")
"""
if not path:
raise RouterOSError("_exec_path requires at least one path segment")
*base, cmd = path
p = api.path(*base) if base else api.path()
return list(p(cmd, **params))
def _list_files(api: Any) -> list[dict[str, Any]]:
return list(api.path("file"))
def _wait_file(api: Any, name: str, timeout: float = 15.0) -> dict[str, Any] | None:
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
for row in _list_files(api):
if row.get("name") == name and int(row.get("size") or 0) > 0:
return row
time.sleep(0.5)
return None
def _delete_file(api: Any, name: str) -> None:
try:
for row in _list_files(api):
if row.get("name") == name:
api.path("file").remove(row[".id"])
return
except Exception as exc: # pragma: no cover
logger.warning("Could not delete file {} on device: {}", name, exc)
# ---------- главный сценарий ----------
def create_backup_via_push(
creds: RouterOSCredentials,
base_name: str,
push_host: str,
push_port: int = 2121,
timeout: float = 90.0,
) -> BackupFiles:
"""Полный цикл: создать backup+export на устройстве, дождаться upload на контроллер."""
binary_name = f"{base_name}.backup"
text_name = f"{base_name}.rsc"
server = get_server() or start_server(port=push_port)
session = server.open_session(expected_files={binary_name, text_name})
try:
logger.info(
"Backup PUSH: device={} base={} push={}:{} user={}",
creds.host, base_name, push_host, push_port, session.username,
)
with routeros_session(creds) as api:
# 1) бинарный backup
try:
_exec_path(api, "system", "backup", "save", name=base_name)
except Exception as exc:
raise RouterOSError(f"backup save failed: {exc}") from exc
if _wait_file(api, binary_name) is None:
raise RouterOSError(f"backup file {binary_name} not appeared on device")
# 2) текстовый export
try:
_exec_path(api, "export", file=base_name)
except Exception as exc:
raise RouterOSError(f"export failed: {exc}") from exc
if _wait_file(api, text_name) is None:
raise RouterOSError(f"export file {text_name} not appeared on device")
# 3) push обоих файлов
for fname in (binary_name, text_name):
try:
_exec_path(
api, "tool", "fetch",
**{
"upload": "yes",
"mode": "ftp",
"address": push_host,
"port": str(push_port),
"user": session.username,
"password": session.password,
"src-path": fname,
"dst-path": fname,
},
)
except Exception as exc:
raise RouterOSError(f"push {fname} failed: {exc}") from exc
# 4) ждём, пока FTP-сервер контроллера получит оба
try:
files = server.wait_files(session.session_id, timeout=timeout)
except TimeoutError as exc:
raise RouterOSError(str(exc)) from exc
if binary_name not in files or text_name not in files:
raise RouterOSError(f"unexpected push contents: got={sorted(files.keys())}")
# 5) подчищаем флэш на устройстве
try:
with routeros_session(creds) as api:
_delete_file(api, binary_name)
_delete_file(api, text_name)
except Exception as exc: # pragma: no cover
logger.warning("Cleanup failed for {}: {}", base_name, exc)
binary_data = files[binary_name]
text_data = files[text_name]
logger.info(
"Backup PUSH ok: {} binary={}b text={}b",
base_name, len(binary_data), len(text_data),
)
return BackupFiles(
binary_name=binary_name,
binary_data=binary_data,
text_name=text_name,
text_data=text_data,
)
finally:
try:
server.close_session(session.session_id)
except Exception: # pragma: no cover
pass
# Обратно-совместимый алиас — используется существующими роутами.
def create_and_download_backup(
creds: RouterOSCredentials,
base_name: str,
push_host: str | None = None,
push_port: int = 2121,
**_legacy: Any,
) -> BackupFiles:
"""Совместимая обёртка: принимает push_host/port вместо ssh/ftp_port."""
if not push_host:
push_host = detect_push_host()
return create_backup_via_push(creds, base_name, push_host=push_host, push_port=push_port)
+283
View File
@@ -0,0 +1,283 @@
"""Тонкий враппер вокруг librouteros для синхронных вызовов из API/воркеров."""
from __future__ import annotations
import socket
import ssl
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, Iterator
from librouteros import connect
from librouteros.exceptions import LibRouterosError
from librouteros.login import plain
from loguru import logger
class RouterOSError(RuntimeError):
pass
@dataclass
class RouterOSCredentials:
host: str
username: str
password: str
# По умолчанию api-ssl: порт 8729 + TLS. plain api (8728) можно использовать
# для legacy-устройств, явно передав port=8728, use_tls=False.
port: int = 8729
use_tls: bool = True
timeout: float = 5.0
@contextmanager
def routeros_session(creds: RouterOSCredentials) -> Iterator[Any]:
kwargs: dict[str, Any] = {
"port": creds.port,
"timeout": creds.timeout,
"login_method": plain,
}
if creds.use_tls:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
kwargs["ssl_wrapper"] = ctx.wrap_socket
try:
api = connect(
host=creds.host,
username=creds.username,
password=creds.password,
**kwargs,
)
logger.info("RouterOS connected: {}:{} user={}", creds.host, creds.port, creds.username)
except (LibRouterosError, OSError, socket.timeout) as exc:
logger.warning(
"RouterOS connection failed: {}:{} user={} reason={}",
creds.host, creds.port, creds.username, exc,
)
raise RouterOSError(f"connect {creds.host}:{creds.port} failed: {exc}") from exc
try:
yield api
finally:
try:
api.close()
except Exception: # pragma: no cover
pass
def fetch_resource(creds: RouterOSCredentials) -> dict[str, Any]:
"""Возвращает первую запись `/system/resource`."""
with routeros_session(creds) as api:
rows = list(api.path("system", "resource"))
return rows[0] if rows else {}
def fetch_identity(creds: RouterOSCredentials) -> str | None:
with routeros_session(creds) as api:
rows = list(api.path("system", "identity"))
if not rows:
return None
return rows[0].get("name")
def fetch_interfaces(creds: RouterOSCredentials) -> list[dict[str, Any]]:
with routeros_session(creds) as api:
return list(api.path("interface"))
def cmd_reboot(creds: RouterOSCredentials) -> None:
"""Перезагрузить устройство (/system/reboot)."""
logger.info("Sending reboot to {}:{}", creds.host, creds.port)
with routeros_session(creds) as api:
tuple(api.path("system", "reboot"))
def cmd_safe_mode(creds: RouterOSCredentials) -> None:
"""Войти в safe mode (/system/safe-mode) — отправляет команду, устройство
подтвердит переход (RouterOS 7+). Если устройство уже в safe mode,
команда завершает его."""
logger.info("Toggling safe-mode on {}:{}", creds.host, creds.port)
with routeros_session(creds) as api:
tuple(api.path("system", "safe-mode"))
def check_internet(creds: RouterOSCredentials, target: str = "8.8.8.8") -> bool:
"""Проверка интернет-доступа на устройстве через `/ping count=1`."""
try:
with routeros_session(creds) as api:
rows = list(api(cmd="/ping", address=target, count="2"))
for row in rows:
recv = int(row.get("received") or 0)
if recv > 0:
return True
return False
except (RouterOSError, Exception) as exc:
logger.warning("internet check failed for {}: {}", creds.host, exc)
return False
def parse_uptime(uptime: str | None) -> int | None:
"""Парсит RouterOS uptime '1w2d3h4m5s' → секунды."""
if not uptime:
return None
import re
units = {"w": 604800, "d": 86400, "h": 3600, "m": 60, "s": 1}
total = 0
for value, unit in re.findall(r"(\d+)([wdhms])", uptime):
total += int(value) * units[unit]
return total or None
def execute_cli(creds: RouterOSCredentials, command: str) -> list[dict[str, Any]]:
"""Выполнить произвольную команду RouterOS API.
Команда должна быть в формате RouterOS API path-style, например:
`/system/identity/print`
`/interface/print`
`/ip/address/print where interface=ether1`
Дополнительные параметры через `name=value` после команды.
Возвращает список словарей-результатов.
"""
parts = command.strip().split()
if not parts:
raise RouterOSError("empty command")
cmd = parts[0]
if not cmd.startswith("/"):
raise RouterOSError("command must start with '/'")
kwargs: dict[str, str] = {}
where: dict[str, str] = {}
in_where = False
for token in parts[1:]:
if token == "where":
in_where = True
continue
if "=" in token:
k, v = token.split("=", 1)
(where if in_where else kwargs)[k] = v
logger.info("CLI exec on {}: {} args={} where={}", creds.host, cmd, kwargs, where)
try:
with routeros_session(creds) as api:
res = api(cmd=cmd, **kwargs)
rows = list(res)
if where:
rows = [r for r in rows if all(str(r.get(k)) == v for k, v in where.items())]
return rows
except (LibRouterosError, OSError) as exc:
raise RouterOSError(f"cli failed: {exc}") from exc
# ---------- Sprint 09 helpers ----------
def fetch_interface_stats(creds: RouterOSCredentials) -> list[dict[str, Any]]:
"""Список интерфейсов со счётчиками rx/tx и флагом running.
Возвращает: [{"name", "rx_bytes", "tx_bytes", "running", "type", "comment"}].
"""
out: list[dict[str, Any]] = []
try:
with routeros_session(creds) as api:
for r in api.path("interface"):
def _i(v: Any) -> int:
try:
return int(v)
except (TypeError, ValueError):
return 0
running = str(r.get("running", "")).lower() == "true"
disabled = str(r.get("disabled", "")).lower() == "true"
out.append({
"name": r.get("name"),
"rx_bytes": _i(r.get("rx-byte")),
"tx_bytes": _i(r.get("tx-byte")),
"running": running,
"disabled": disabled,
"type": r.get("type"),
"comment": r.get("comment") or None,
"mac_address": r.get("mac-address") or None,
})
except (LibRouterosError, OSError) as exc:
raise RouterOSError(f"interface stats failed: {exc}") from exc
return out
def fetch_dhcp_leases(creds: RouterOSCredentials) -> list[dict[str, Any]]:
"""Все лизы DHCP-сервера на устройстве."""
out: list[dict[str, Any]] = []
try:
with routeros_session(creds) as api:
for r in api.path("ip", "dhcp-server", "lease"):
out.append({
"address": r.get("address"),
"mac_address": r.get("mac-address"),
"host_name": r.get("host-name") or r.get("comment"),
"comment": r.get("comment") or None,
"server": r.get("server"),
"status": r.get("status"),
"dynamic": str(r.get("dynamic", "")).lower() == "true",
"blocked": str(r.get("blocked", "")).lower() == "true",
"last_seen": r.get("last-seen"),
"expires_after": r.get("expires-after"),
})
except (LibRouterosError, OSError) as exc:
raise RouterOSError(f"dhcp leases failed: {exc}") from exc
return out
def cmd_upgrade_check(creds: RouterOSCredentials, channel: str = "stable") -> dict[str, Any]:
"""Запросить у MikroTik проверку доступного обновления и инициировать
/system/package/update/check-for-updates. Возвращает текущее состояние."""
try:
with routeros_session(creds) as api:
try:
tuple(api.path("system", "package", "update").call("set",
**{"channel": channel}))
except Exception:
pass
try:
tuple(api(cmd="/system/package/update/check-for-updates"))
except Exception:
pass
rows = list(api.path("system", "package", "update"))
return rows[0] if rows else {}
except (LibRouterosError, OSError) as exc:
raise RouterOSError(f"upgrade check failed: {exc}") from exc
def cmd_upgrade_install(creds: RouterOSCredentials) -> None:
"""Запустить установку обновления (устройство ребутнётся)."""
try:
with routeros_session(creds) as api:
tuple(api(cmd="/system/package/update/install"))
except (LibRouterosError, OSError) as exc:
raise RouterOSError(f"upgrade install failed: {exc}") from exc
def push_firmware_via_ftp(
creds: RouterOSCredentials,
server: str,
port: int,
user: str,
password: str,
src_path: str,
dst_filename: str,
) -> None:
"""Загрузить файл с FTP-сервера контроллера на устройство (`/tool/fetch download`).
Используется для установки прошивки из локального репозитория без выгрузки на устройство.
"""
url = f"ftp://{server}:{port}/{src_path}"
try:
with routeros_session(creds) as api:
tuple(api(
cmd="/tool/fetch",
url=url, user=user, password=password,
mode="ftp", **{"dst-path": dst_filename},
))
except (LibRouterosError, OSError) as exc:
raise RouterOSError(f"fetch firmware failed: {exc}") from exc
def cmd_reboot_for_upgrade(creds: RouterOSCredentials) -> None:
"""`/system/reboot` — после загрузки .npk RouterOS установит апдейт при загрузке."""
cmd_reboot(creds)
+89
View File
@@ -0,0 +1,89 @@
"""Глобальные настройки контроллера: хранятся в БД как один JSON-блоб (key='global')."""
from __future__ import annotations
import json
from typing import Any
from sqlalchemy.orm import Session
from ..models.settings import AppSetting
KEY = "global"
# Дефолтные значения. Нельзя менять ключи — только добавлять новые.
DEFAULTS: dict[str, Any] = {
# Брендинг и локализация интерфейса
"ui": {
"instance_name": "ROSzetta", # отображается в шапке
"locale": "ru", # ru | en | uz
"theme": "mk-dark", # см. фронтенд theme.ts
"heartbeat_hours": 6, # окно heartbeat-сетки на дашборде: 6 | 3 | 1 | 0.5
"probe_interval_minutes": 5, # автоопрос устройств: 1 | 2 | 3 | 5 | 10
},
# Видимость пунктов меню
"menu": {
"dashboard": True,
"devices": True,
"switches": True,
"firmware": True,
"notif_center": True,
"cli": True,
"settings": True,
},
# Включение/отключение генерации алертов и учёта в global health
"notify": {
"device_status": True, # переход up<->down
"internet": True, # отсутствие интернета на устройстве
"abnormal_reboot": True, # аномальная перезагрузка
"firmware": True, # вышла новая версия RouterOS
"style": "jokes", # стиль сообщений GlobalHealth: jokes | serious
},
# Telegram-бот (опциональная отправка алертов)
"telegram": {
"enabled": False,
"bot_token": "",
"chat_id": "",
"min_severity": "warning", # info|warning|error|critical
},
}
def _merge(base: dict[str, Any], override: dict[str, Any]) -> dict[str, Any]:
out = dict(base)
for k, v in override.items():
if isinstance(v, dict) and isinstance(out.get(k), dict):
out[k] = _merge(out[k], v)
else:
out[k] = v
return out
def get_settings_dict(db: Session) -> dict[str, Any]:
row = db.query(AppSetting).filter(AppSetting.key == KEY).first()
if not row:
return json.loads(json.dumps(DEFAULTS))
try:
stored = json.loads(row.value)
except Exception:
stored = {}
return _merge(DEFAULTS, stored if isinstance(stored, dict) else {})
def update_settings_dict(db: Session, patch: dict[str, Any]) -> dict[str, Any]:
current = get_settings_dict(db)
merged = _merge(current, patch)
row = db.query(AppSetting).filter(AppSetting.key == KEY).first()
if not row:
row = AppSetting(key=KEY, value=json.dumps(merged))
db.add(row)
else:
row.value = json.dumps(merged)
db.commit()
return merged
_SEVERITY_RANK = {"info": 0, "warning": 1, "error": 2, "critical": 3}
def severity_meets(actual: str, threshold: str) -> bool:
return _SEVERITY_RANK.get(actual, 0) >= _SEVERITY_RANK.get(threshold, 1)
+31
View File
@@ -0,0 +1,31 @@
"""Опциональная отправка сообщений в Telegram-бот."""
from __future__ import annotations
import httpx
from loguru import logger
def send_message(bot_token: str, chat_id: str, text: str) -> bool:
if not bot_token or not chat_id:
return False
url = f"https://api.telegram.org/bot{bot_token}/sendMessage"
try:
r = httpx.post(
url,
json={"chat_id": chat_id, "text": text, "parse_mode": "HTML", "disable_web_page_preview": True},
timeout=8.0,
)
if r.status_code != 200:
logger.warning("telegram send failed: {} {}", r.status_code, r.text[:200])
return False
return True
except Exception as exc: # noqa: BLE001
logger.warning("telegram send error: {}", exc)
return False
def test_credentials(bot_token: str, chat_id: str) -> tuple[bool, str]:
if not bot_token or not chat_id:
return False, "Не заданы bot_token или chat_id"
ok = send_message(bot_token, chat_id, "<b>ROSzetta</b>\nТестовое сообщение \u2705")
return (ok, "OK" if ok else "Не удалось отправить (см. логи)")
+24
View File
@@ -0,0 +1,24 @@
fastapi==0.115.0
uvicorn[standard]==0.30.6
pydantic[email]==2.9.2
pydantic-settings==2.5.2
email-validator==2.2.0
SQLAlchemy==2.0.35
asyncpg==0.29.0
psycopg2-binary==2.9.9
alembic==1.13.3
passlib[bcrypt]==1.7.4
bcrypt==4.2.0
python-jose[cryptography]==3.3.0
python-multipart==0.0.12
httpx==0.27.2
librouteros==3.4.1
paramiko==3.5.0
pysnmp==6.2.6
redis==5.1.1
celery==5.4.0
boto3==1.35.36
cryptography==43.0.1
loguru==0.7.2
APScheduler==3.10.4
pyftpdlib==1.5.10