|
| 1 | +from datetime import timedelta |
| 2 | +from typing import Type |
| 3 | + |
| 4 | +import sqlalchemy |
| 5 | +from sqlmodel import SQLModel |
| 6 | +from sqlmodel.sql.sqltypes import AutoString |
| 7 | + |
| 8 | +from timescaledb import exceptions |
| 9 | + |
| 10 | + |
| 11 | +def validate_compress_segmentby_field( |
| 12 | + model: Type[SQLModel], segmentby_field: str = None |
| 13 | +) -> bool: |
| 14 | + """ |
| 15 | + Verify if the specified field is a valid segmentby field. |
| 16 | + Valid types include String, Integer, Boolean, and other scalar types. |
| 17 | + Arrays and JSON types are not supported for segmentby. |
| 18 | +
|
| 19 | + Column list on which to key the compressed segments. |
| 20 | + An identifier representing the source of the data such as device_id or tags_id is usually a good candidate. |
| 21 | + The default is no segment by columns. |
| 22 | + """ |
| 23 | + if segmentby_field is None: |
| 24 | + return True |
| 25 | + column = model.__table__.columns.get(segmentby_field) |
| 26 | + if column is None: |
| 27 | + raise exceptions.InvalidSegmentByField( |
| 28 | + f"Field '{segmentby_field}' not found in model {model.__name__}" |
| 29 | + ) |
| 30 | + |
| 31 | + # Types that are valid for segmentby |
| 32 | + valid_types = ( |
| 33 | + AutoString, |
| 34 | + sqlalchemy.String, |
| 35 | + sqlalchemy.Integer, |
| 36 | + sqlalchemy.SmallInteger, |
| 37 | + sqlalchemy.BigInteger, |
| 38 | + sqlalchemy.Boolean, |
| 39 | + sqlalchemy.Date, |
| 40 | + sqlalchemy.DateTime, |
| 41 | + sqlalchemy.Enum, |
| 42 | + sqlalchemy.Float, |
| 43 | + sqlalchemy.Numeric, |
| 44 | + ) |
| 45 | + |
| 46 | + column_type = type(column.type) |
| 47 | + if not issubclass(column_type, valid_types): |
| 48 | + raise exceptions.InvalidSegmentByField( |
| 49 | + f"Field '{segmentby_field}' in model {model.__name__} has invalid type {column_type.__name__}. " |
| 50 | + f"Must be one of: {', '.join(t.__name__ for t in valid_types)}" |
| 51 | + ) |
| 52 | + |
| 53 | + return True |
| 54 | + |
| 55 | + |
| 56 | +def validate_compress_orderby_field( |
| 57 | + model: Type[SQLModel], orderby_field: str = None |
| 58 | +) -> bool: |
| 59 | + """ |
| 60 | + Order used by compression, specified in the same way as the ORDER BY clause in a SELECT query. |
| 61 | + The default is the descending order of the hypertable's time column. |
| 62 | +
|
| 63 | + orderby_field: format is '<column_name> [ASC | DESC] [ NULLS { FIRST | LAST } ] [, ...]', |
| 64 | + """ |
| 65 | + if orderby_field is None: |
| 66 | + return True |
| 67 | + # Split on commas to handle multiple orderby fields |
| 68 | + for field_spec in orderby_field.split(","): |
| 69 | + field_spec = field_spec.strip() |
| 70 | + orderby_parts = field_spec.split() |
| 71 | + |
| 72 | + if not orderby_parts: |
| 73 | + raise exceptions.InvalidOrderByField("Empty orderby field specification") |
| 74 | + |
| 75 | + orderby_column_name = orderby_parts[0] |
| 76 | + column = model.__table__.columns.get(orderby_column_name) |
| 77 | + if column is None: |
| 78 | + raise exceptions.InvalidOrderByField( |
| 79 | + f"Field '{orderby_column_name}' not found in model {model.__name__}" |
| 80 | + ) |
| 81 | + |
| 82 | + # Types that are not valid for orderby |
| 83 | + invalid_types = ( |
| 84 | + sqlalchemy.JSON, |
| 85 | + sqlalchemy.ARRAY, |
| 86 | + sqlalchemy.PickleType, |
| 87 | + ) |
| 88 | + |
| 89 | + column_type = type(column.type) |
| 90 | + if issubclass(column_type, invalid_types): |
| 91 | + raise exceptions.InvalidOrderByField( |
| 92 | + f"Field '{orderby_column_name}' in model {model.__name__} has invalid type {column_type.__name__}. " |
| 93 | + "JSON, ARRAY, and PickleType are not supported for orderby fields" |
| 94 | + ) |
| 95 | + |
| 96 | + # Validate direction if specified |
| 97 | + if len(orderby_parts) > 1: |
| 98 | + direction = orderby_parts[1].upper() |
| 99 | + if direction not in ("ASC", "DESC"): |
| 100 | + raise exceptions.InvalidOrderByField( |
| 101 | + f"Invalid direction '{direction}' in orderby field '{field_spec}'. " |
| 102 | + "Must be one of: ASC, DESC" |
| 103 | + ) |
| 104 | + |
| 105 | + # Validate NULLS FIRST/LAST if specified |
| 106 | + if len(orderby_parts) > 2: |
| 107 | + if len(orderby_parts) < 4: |
| 108 | + raise exceptions.InvalidOrderByField( |
| 109 | + f"Invalid NULLS specification in '{field_spec}'. " |
| 110 | + "Must be 'NULLS FIRST' or 'NULLS LAST'" |
| 111 | + ) |
| 112 | + nulls_keyword = orderby_parts[2].upper() |
| 113 | + nulls_position = orderby_parts[3].upper() |
| 114 | + if nulls_keyword != "NULLS" or nulls_position not in ("FIRST", "LAST"): |
| 115 | + raise exceptions.InvalidOrderByField( |
| 116 | + f"Invalid NULLS specification in '{field_spec}'. " |
| 117 | + "Must be 'NULLS FIRST' or 'NULLS LAST'" |
| 118 | + ) |
| 119 | + |
| 120 | + return True |
| 121 | + |
| 122 | + |
| 123 | +def validate_unique_segmentby_and_orderby_fields( |
| 124 | + model: Type[SQLModel], segmentby_field: str = None, orderby_field: str = None |
| 125 | +) -> bool: |
| 126 | + """ |
| 127 | + Validate that the segmentby and orderby fields are unique. |
| 128 | + """ |
| 129 | + if segmentby_field is None or orderby_field is None: |
| 130 | + return True |
| 131 | + orderby_fields = orderby_field.split(" ") |
| 132 | + orderby_column = orderby_fields[0] |
| 133 | + if orderby_column == segmentby_field: |
| 134 | + raise exceptions.InvalidCompressionFields( |
| 135 | + "Segmentby and orderby fields must be different" |
| 136 | + ) |
| 137 | + return True |
0 commit comments