Looking good

This commit is contained in:
Jonas Linter
2025-11-18 14:49:44 +01:00
parent 34bff6a12a
commit b826277b54
8 changed files with 393 additions and 22 deletions

View File

@@ -336,7 +336,7 @@ async def lifespan(app: FastAPI):
# via run_migrations.py or `uv run alembic upgrade head`
if is_primary:
_LOGGER.info("Running startup tasks (primary worker)...")
await run_startup_tasks(AsyncSessionLocal, config)
await run_startup_tasks(AsyncSessionLocal, config, engine)
_LOGGER.info("Startup tasks completed")
else:
_LOGGER.info("Skipping startup tasks (non-primary worker)")

View File

@@ -658,7 +658,7 @@ class ConversionService:
rate_plan_code=rate_plan_code,
connected_room_type=connected_room_type,
daily_sales=daily_sales_list if daily_sales_list else None,
total_revenue=str(total_revenue) if total_revenue > 0 else None,
total_revenue=total_revenue if total_revenue > 0 else None,
created_at=datetime.now(),
updated_at=datetime.now(),
)

View File

@@ -10,6 +10,7 @@ from sqlalchemy import (
Column,
Date,
DateTime,
Double,
ForeignKey,
Integer,
String,
@@ -459,6 +460,8 @@ class Conversion(Base):
guest_last_name = Column(String, index=True) # lastName from guest element
guest_email = Column(String, index=True) # email from guest element
guest_country_code = Column(String) # countryCode from guest element
guest_birth_date = Column(Date) # birthDate from guest element
guest_id = Column(String) # id from guest element
# Advertising/tracking data - used for matching to existing reservations
advertising_medium = Column(
@@ -527,7 +530,7 @@ class ConversionRoom(Base):
# Extracted total revenue for efficient querying (sum of all revenue_total in daily_sales)
# Kept as string to preserve decimal precision
total_revenue = Column(String, nullable=True)
total_revenue = Column(Double, nullable=True)
# Metadata
created_at = Column(DateTime(timezone=True)) # When this record was imported

View File

@@ -9,8 +9,10 @@ before the application starts accepting requests. It includes:
import asyncio
from typing import Any
from sqlalchemy import text
from sqlalchemy.ext.asyncio import AsyncEngine, async_sessionmaker
from .const import CONF_GOOGLE_ACCOUNT, CONF_HOTEL_ID, CONF_META_ACCOUNT
from .customer_service import CustomerService
from .db import create_database_engine
from .logging_config import get_logger
@@ -62,8 +64,182 @@ async def setup_database(config: dict[str, Any] | None = None) -> tuple[AsyncEng
raise
async def backfill_advertising_account_ids(
engine: AsyncEngine, config: dict[str, Any]
) -> None:
"""Backfill advertising account IDs for existing reservations.
Updates existing reservations to populate meta_account_id and google_account_id
based on the conditional logic:
- If fbclid is present, set meta_account_id from hotel config
- If gclid is present, set google_account_id from hotel config
This is a startup task that runs after schema migrations to ensure
existing data is consistent with config.
Args:
engine: SQLAlchemy async engine
config: Application configuration dict
"""
_LOGGER.info("Backfilling advertising account IDs for existing reservations...")
# Build a mapping of hotel_id -> account IDs from config
hotel_accounts = {}
alpine_bits_auth = config.get("alpine_bits_auth", [])
for hotel in alpine_bits_auth:
hotel_id = hotel.get(CONF_HOTEL_ID)
meta_account = hotel.get(CONF_META_ACCOUNT)
google_account = hotel.get(CONF_GOOGLE_ACCOUNT)
if hotel_id:
hotel_accounts[hotel_id] = {
"meta_account": meta_account,
"google_account": google_account,
}
if not hotel_accounts:
_LOGGER.debug("No hotel accounts found in config, skipping backfill")
return
_LOGGER.info("Found %d hotel(s) with account configurations", len(hotel_accounts))
# Update reservations with meta_account_id where fbclid is present
meta_updated = 0
for hotel_id, accounts in hotel_accounts.items():
if accounts["meta_account"]:
async with engine.begin() as conn:
sql = text(
"UPDATE reservations "
"SET meta_account_id = :meta_account "
"WHERE hotel_code = :hotel_id "
"AND fbclid IS NOT NULL "
"AND fbclid != '' "
"AND (meta_account_id IS NULL OR meta_account_id = '')"
)
result = await conn.execute(
sql,
{"meta_account": accounts["meta_account"], "hotel_id": hotel_id},
)
count = result.rowcount
if count > 0:
_LOGGER.info(
"Updated %d reservations with meta_account_id for hotel %s",
count,
hotel_id,
)
meta_updated += count
# Update reservations with google_account_id where gclid is present
google_updated = 0
for hotel_id, accounts in hotel_accounts.items():
if accounts["google_account"]:
async with engine.begin() as conn:
sql = text(
"UPDATE reservations "
"SET google_account_id = :google_account "
"WHERE hotel_code = :hotel_id "
"AND gclid IS NOT NULL "
"AND gclid != '' "
"AND (google_account_id IS NULL OR google_account_id = '')"
)
result = await conn.execute(
sql,
{
"google_account": accounts["google_account"],
"hotel_id": hotel_id,
},
)
count = result.rowcount
if count > 0:
_LOGGER.info(
"Updated %d reservations with google_account_id for hotel %s",
count,
hotel_id,
)
google_updated += count
if meta_updated > 0 or google_updated > 0:
_LOGGER.info(
"Backfill complete: %d reservations updated with meta_account_id, "
"%d with google_account_id",
meta_updated,
google_updated,
)
async def backfill_acked_requests_username(
engine: AsyncEngine, config: dict[str, Any]
) -> None:
"""Backfill username for existing acked_requests records.
For each acknowledgement, find the corresponding reservation to determine
its hotel_code, then look up the username for that hotel in the config
and update the acked_request record.
This is a startup task that runs after schema migrations to ensure
existing data is consistent with config.
Args:
engine: SQLAlchemy async engine
config: Application configuration dict
"""
_LOGGER.info("Backfilling usernames for existing acked_requests...")
# Build a mapping of hotel_id -> username from config
hotel_usernames = {}
alpine_bits_auth = config.get("alpine_bits_auth", [])
for hotel in alpine_bits_auth:
hotel_id = hotel.get(CONF_HOTEL_ID)
username = hotel.get("username")
if hotel_id and username:
hotel_usernames[hotel_id] = username
if not hotel_usernames:
_LOGGER.debug("No hotel usernames found in config, skipping backfill")
return
_LOGGER.info("Found %d hotel(s) with usernames in config", len(hotel_usernames))
# Update acked_requests with usernames by matching to reservations
total_updated = 0
async with engine.begin() as conn:
for hotel_id, username in hotel_usernames.items():
sql = text(
"""
UPDATE acked_requests
SET username = :username
WHERE unique_id IN (
SELECT md5_unique_id FROM reservations WHERE hotel_code = :hotel_id
)
AND username IS NULL
"""
)
result = await conn.execute(
sql, {"username": username, "hotel_id": hotel_id}
)
count = result.rowcount
if count > 0:
_LOGGER.info(
"Updated %d acknowledgements with username for hotel %s",
count,
hotel_id,
)
total_updated += count
if total_updated > 0:
_LOGGER.info(
"Backfill complete: %d acknowledgements updated with username",
total_updated,
)
async def run_startup_tasks(
sessionmaker: async_sessionmaker, config: dict[str, Any] | None = None
sessionmaker: async_sessionmaker,
config: dict[str, Any] | None = None,
engine: AsyncEngine | None = None,
) -> None:
"""Run one-time startup tasks.
@@ -73,6 +249,7 @@ async def run_startup_tasks(
Args:
sessionmaker: SQLAlchemy async sessionmaker
config: Application configuration dictionary
engine: SQLAlchemy async engine (optional, for backfill tasks)
"""
# Hash any existing customers that don't have hashed data
async with sessionmaker() as session:
@@ -83,4 +260,15 @@ async def run_startup_tasks(
"Backfilled hashed data for %d existing customers", hashed_count
)
else:
_LOGGER.info("All existing customers already have hashed data")
_LOGGER.debug("All existing customers already have hashed data")
# Backfill advertising account IDs and usernames based on config
# This ensures existing data is consistent with current configuration
if config and engine:
await backfill_advertising_account_ids(engine, config)
await backfill_acked_requests_username(engine, config)
elif config and not engine:
_LOGGER.warning(
"No engine provided to run_startup_tasks, "
"skipping config-based backfill tasks"
)

View File

@@ -1,7 +1,24 @@
"""Database migrations for AlpineBits.
"""DEPRECATED: Legacy database migrations for AlpineBits.
This module contains migration functions that are automatically run at app startup
to update existing database schemas without losing data.
⚠️ WARNING: This module is deprecated and no longer used. ⚠️
SCHEMA MIGRATIONS are now handled by Alembic (see alembic/versions/).
STARTUP TASKS (data backfills) are now in db_setup.py.
Migration History:
- migrate_add_room_types: Schema migration (should be in Alembic)
- migrate_add_advertising_account_ids: Schema + backfill (split into Alembic + db_setup.py)
- migrate_add_username_to_acked_requests: Schema + backfill (split into Alembic + db_setup.py)
- migrate_normalize_conversions: Schema migration (should be in Alembic)
Current Status:
- All schema changes are now managed via Alembic migrations
- All data backfills are now in db_setup.py as startup tasks
- This file is kept for reference but is no longer executed
Do not add new migrations here. Instead:
1. For schema changes: Create Alembic migration with `uv run alembic revision --autogenerate -m "description"`
2. For data backfills: Add to db_setup.py as a startup task
"""
from typing import Any