/ migrations / versions / 008_add_eval_tables.py
008_add_eval_tables.py
 1  from alembic import op
 2  import sqlalchemy as sa
 3  
 4  # revision identifiers, used by Alembic.
 5  revision = '008'
 6  down_revision = '007'
 7  branch_labels = None
 8  depends_on = None
 9  
10  def upgrade():
11      try:
12          op.create_table(
13              'eval_datasets',
14              sa.Column('id', sa.Integer(), primary_key=True, index=True),
15              sa.Column('name', sa.String(255)),
16              sa.Column('description', sa.Text(), nullable=True),
17              sa.Column('project_id', sa.Integer(), sa.ForeignKey('projects.id'), nullable=False),
18              sa.Column('created_at', sa.DateTime()),
19              sa.Column('updated_at', sa.DateTime()),
20          )
21      except Exception as e:
22          print(f"Error creating eval_datasets: {e}")
23  
24      try:
25          op.create_table(
26              'eval_test_cases',
27              sa.Column('id', sa.Integer(), primary_key=True, index=True),
28              sa.Column('dataset_id', sa.Integer(), sa.ForeignKey('eval_datasets.id'), nullable=False),
29              sa.Column('question', sa.Text(), nullable=False),
30              sa.Column('expected_answer', sa.Text(), nullable=True),
31              sa.Column('context', sa.Text(), nullable=True),
32              sa.Column('created_at', sa.DateTime()),
33          )
34      except Exception as e:
35          print(f"Error creating eval_test_cases: {e}")
36  
37      try:
38          op.create_table(
39              'eval_runs',
40              sa.Column('id', sa.Integer(), primary_key=True, index=True),
41              sa.Column('dataset_id', sa.Integer(), sa.ForeignKey('eval_datasets.id'), nullable=False),
42              sa.Column('project_id', sa.Integer(), sa.ForeignKey('projects.id'), nullable=False),
43              sa.Column('status', sa.String(50), default='pending'),
44              sa.Column('metrics', sa.Text()),
45              sa.Column('summary', sa.Text(), nullable=True),
46              sa.Column('started_at', sa.DateTime(), nullable=True),
47              sa.Column('completed_at', sa.DateTime(), nullable=True),
48              sa.Column('created_at', sa.DateTime()),
49              sa.Column('error', sa.Text(), nullable=True),
50          )
51      except Exception as e:
52          print(f"Error creating eval_runs: {e}")
53  
54      try:
55          op.create_table(
56              'eval_results',
57              sa.Column('id', sa.Integer(), primary_key=True, index=True),
58              sa.Column('run_id', sa.Integer(), sa.ForeignKey('eval_runs.id'), nullable=False),
59              sa.Column('test_case_id', sa.Integer(), sa.ForeignKey('eval_test_cases.id'), nullable=False),
60              sa.Column('actual_answer', sa.Text(), nullable=True),
61              sa.Column('retrieval_context', sa.Text(), nullable=True),
62              sa.Column('metric_name', sa.String(255)),
63              sa.Column('score', sa.Float()),
64              sa.Column('reason', sa.Text(), nullable=True),
65              sa.Column('passed', sa.Boolean(), default=False),
66              sa.Column('latency_ms', sa.Integer(), nullable=True),
67          )
68      except Exception as e:
69          print(f"Error creating eval_results: {e}")
70  
71  
72  def downgrade():
73      for table in ['eval_results', 'eval_runs', 'eval_test_cases', 'eval_datasets']:
74          try:
75              op.drop_table(table)
76          except Exception as e:
77              print(f"Error dropping {table}: {e}")