Skip to content

Commit 85648d0

Browse files
committed
delete more tests
1 parent 1bae973 commit 85648d0

File tree

2 files changed

+1
-56
lines changed

2 files changed

+1
-56
lines changed

tests/datasets/storages/test_storage_factory.py

-2
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@
1313
StorageKey.DISCOVER,
1414
StorageKey.ERRORS,
1515
StorageKey.ERRORS_RO,
16-
StorageKey.GROUPEDMESSAGES,
17-
StorageKey.GROUPASSIGNEES,
1816
StorageKey.METRICS_COUNTERS,
1917
StorageKey.ORG_METRICS_COUNTERS,
2018
StorageKey.METRICS_DISTRIBUTIONS,

tests/migrations/test_runner_individual.py

+1-54
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from snuba.consumers.types import KafkaMessageMetadata
1212
from snuba.datasets.storages.factory import get_writable_storage
1313
from snuba.datasets.storages.storage_key import StorageKey
14-
from snuba.migrations.groups import MigrationGroup, get_group_loader
14+
from snuba.migrations.groups import MigrationGroup
1515
from snuba.migrations.runner import MigrationKey, Runner
1616
from snuba.migrations.status import Status
1717
from snuba.processor import InsertBatch
@@ -146,59 +146,6 @@ def generate_transactions() -> None:
146146
).write(rows)
147147

148148

149-
@pytest.mark.clickhouse_db
150-
def test_groupedmessages_compatibility() -> None:
151-
cluster = get_cluster(StorageSetKey.EVENTS)
152-
153-
# Ignore the multi node mode because this tests a migration
154-
# for an older table state that only applied to single node
155-
if not cluster.is_single_node():
156-
return
157-
158-
database = cluster.get_database()
159-
connection = cluster.get_query_connection(ClickhouseClientSettings.MIGRATE)
160-
161-
# Create old style table witihout project ID
162-
connection.execute(
163-
"""
164-
CREATE TABLE groupedmessage_local (`offset` UInt64, `record_deleted` UInt8,
165-
`id` UInt64, `status` Nullable(UInt8), `last_seen` Nullable(DateTime),
166-
`first_seen` Nullable(DateTime), `active_at` Nullable(DateTime),
167-
`first_release_id` Nullable(UInt64)) ENGINE = ReplacingMergeTree(offset)
168-
ORDER BY id SAMPLE BY id SETTINGS index_granularity = 8192
169-
"""
170-
)
171-
172-
migration_id = "0010_groupedmessages_onpremise_compatibility"
173-
174-
runner = Runner()
175-
runner.run_migration(
176-
MigrationKey(MigrationGroup.SYSTEM, "0001_migrations"), force=True
177-
)
178-
events_migrations = get_group_loader(MigrationGroup.EVENTS).get_migrations()
179-
180-
# Mark prior migrations complete
181-
for migration in events_migrations[: (events_migrations.index(migration_id))]:
182-
runner._update_migration_status(
183-
MigrationKey(MigrationGroup.EVENTS, migration), Status.COMPLETED
184-
)
185-
186-
runner.run_migration(
187-
MigrationKey(MigrationGroup.EVENTS, migration_id),
188-
force=True,
189-
)
190-
191-
outcome = perform_select_query(
192-
["primary_key"],
193-
"system.tables",
194-
{"name": "groupedmessage_local", "database": str(database)},
195-
None,
196-
connection,
197-
)
198-
199-
assert outcome == [("project_id, id",)]
200-
201-
202149
@pytest.mark.clickhouse_db
203150
def run_prior_migrations(
204151
migration_group: MigrationGroup, stop_migration_id: str, runner: Runner

0 commit comments

Comments
 (0)