Skip to content

Commit

Permalink
Added support for version 23 (ansible-collections#93)
Browse files Browse the repository at this point in the history
* Added support for version 23

* Deleted the tests for version 22

* Added support for version 23

* Added a detailed changelog

* Added a detailed changelog

* Added a detailed changelog
  • Loading branch information
aleksvagachev authored Jan 17, 2025
1 parent 82b67ad commit 4bbd6c5
Show file tree
Hide file tree
Showing 4 changed files with 82 additions and 43 deletions.
1 change: 0 additions & 1 deletion .github/workflows/ansible-test-plugins.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,6 @@ jobs:
- stable-2.18
- devel
clickhouse:
- 22.8.9.24
- 23.8.9.54
- 24.8.6.70
python:
Expand Down
7 changes: 7 additions & 0 deletions changelogs/fragments/0-clickhouse_info.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
major_changes:
- clickhouse_info - removed support for clickhouse versions 21 and 22 (https://github.com/ansible-collections/community.clickhouse/pull/93).

minor_changes:
- clickhouse_info - columns are extracted from clickhouse version 23 system tables,
the affected system tables are - databases, clusters, tables, dictionaries, settings, merge_tree_settings, users, settings_profile_elements
(https://github.com/ansible-collections/community.clickhouse/pull/93).
111 changes: 72 additions & 39 deletions plugins/modules/clickhouse_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,8 @@ def get_databases(module, client):
Returns a dictionary with database names as keys.
"""
query = "SELECT name, engine, data_path, uuid FROM system.databases"
query = ("SELECT name, engine, data_path, metadata_path, uuid, "
"engine_full, comment FROM system.databases")
result = execute_query(module, client, query)

if result == PRIV_ERR_CODE:
Expand All @@ -200,7 +201,10 @@ def get_databases(module, client):
db_info[row[0]] = {
"engine": row[1],
"data_path": row[2],
"uuid": str(row[3]),
"metadata_path": row[3],
"uuid": str(row[4]),
"engine_full": row[5],
"comment": row[6],
}

return db_info
Expand All @@ -213,7 +217,7 @@ def get_clusters(module, client):
"""
query = ("SELECT cluster, shard_num, shard_weight, replica_num, host_name, "
"host_address, port, is_local, user, default_database, errors_count, "
"estimated_recovery_time FROM system.clusters")
"slowdowns_count, estimated_recovery_time FROM system.clusters")
result = execute_query(module, client, query)

if result == PRIV_ERR_CODE:
Expand All @@ -233,7 +237,8 @@ def get_clusters(module, client):
user = row[8]
default_database = row[9]
errors_count = row[10]
estimated_recovery_time = row[11]
slowdowns_count = row[11]
estimated_recovery_time = row[12]

# Add cluster if not already there
if cluster not in cluster_info:
Expand All @@ -256,6 +261,7 @@ def get_clusters(module, client):
"user": user,
"default_database": default_database,
"errors_count": errors_count,
"slowdowns_count": slowdowns_count,
"estimated_recovery_time": estimated_recovery_time,
}

Expand Down Expand Up @@ -293,9 +299,11 @@ def get_tables(module, client):
"""
query = ("SELECT database, name, uuid, engine, is_temporary, data_paths, "
"metadata_path, metadata_modification_time, dependencies_database, "
"dependencies_table, create_table_query, engine_full, partition_key, "
"dependencies_table, create_table_query, engine_full, as_select, partition_key, "
"sorting_key, primary_key, sampling_key, storage_policy, total_rows, total_bytes, "
"lifetime_rows, lifetime_bytes FROM system.tables")
"parts, active_parts, total_marks, lifetime_rows, lifetime_bytes, comment, "
"has_own_data, loading_dependencies_database, loading_dependencies_table, "
"loading_dependent_database, loading_dependent_table FROM system.tables")
result = execute_query(module, client, query)

if result == PRIV_ERR_CODE:
Expand All @@ -316,15 +324,25 @@ def get_tables(module, client):
"dependencies_table": row[9],
"create_table_query": row[10],
"engine_full": row[11],
"partition_key": row[12],
"sorting_key": row[13],
"primary_key": row[14],
"sampling_key": row[15],
"storage_policy": row[16],
"total_rows": row[17],
"total_bytes": row[18],
"lifetime_rows": row[19],
"lifetime_bytes": row[20],
"as_select": row[12],
"partition_key": row[13],
"sorting_key": row[14],
"primary_key": row[15],
"sampling_key": row[16],
"storage_policy": row[17],
"total_rows": row[18],
"total_bytes": row[19],
"parts": row[20],
"active_parts": row[21],
"total_marks": row[22],
"lifetime_rows": row[23],
"lifetime_bytes": row[24],
"comment": row[25],
"has_own_data": row[26],
"loading_dependencies_database": row[27],
"loading_dependencies_table": row[28],
"loading_dependent_database": row[29],
"loading_dependent_table": row[30],
}

return tables_info
Expand All @@ -336,11 +354,12 @@ def get_dictionaries(module, client):
Returns a dictionary with databases name as dictionary,
and the name of the 'dictionary' in this dictionary is the key.
"""
query = ("SELECT database, name, uuid, status, origin, type, key, "
"attribute.names, attribute.types, bytes_allocated, query_count, "
"hit_rate, element_count, load_factor, source, lifetime_min, "
query = ("SELECT database, name, uuid, status, origin, key.names, key.types, "
"attribute.names, attribute.types, bytes_allocated, "
"hierarchical_index_bytes_allocated, query_count, hit_rate, found_rate, "
"element_count, load_factor, source, lifetime_min, "
"lifetime_max, loading_start_time, last_successful_update_time, "
"loading_duration, last_exception FROM system.dictionaries")
"loading_duration, last_exception, comment FROM system.dictionaries")
result = execute_query(module, client, query)

if result == PRIV_ERR_CODE:
Expand All @@ -355,22 +374,24 @@ def get_dictionaries(module, client):
"uuid": str(row[2]),
"status": row[3],
"origin": row[4],
"type": row[5],
"key": row[6],
"key.names": row[5],
"key.types": row[6],
"attribute.names": row[7],
"attribute.types": row[8],
"bytes_allocated": row[9],
"query_count": row[10],
"hit_rate": row[11],
"element_count": row[12],
"load_factor": row[13],
"source": row[14],
"lifetime_min": row[15],
"lifetime_max": row[16],
"loading_start_time": row[17],
"last_successful_update_time": row[18],
"loading_duration": row[19],
"last_exception": row[20],
"hierarchical_index_bytes_allocated": row[10],
"query_count": row[11],
"hit_rate": row[12],
"found_rate": row[13],
"element_count": row[14],
"load_factor": row[15],
"source": row[16],
"lifetime_min": row[17],
"lifetime_max": row[18],
"loading_start_time": row[19],
"last_successful_update_time": row[20],
"loading_duration": row[21],
"last_exception": row[22],
}

return dictionaries_info
Expand All @@ -382,7 +403,7 @@ def get_settings(module, client):
Returns a dictionary with settings names as keys.
"""
query = ("SELECT name, value, changed, description, min, max, readonly, "
"type FROM system.settings")
"type, default, alias_for FROM system.settings")
result = execute_query(module, client, query)

if result == PRIV_ERR_CODE:
Expand All @@ -398,6 +419,8 @@ def get_settings(module, client):
"max": row[5],
"readonly": row[6],
"type": row[7],
"default": row[8],
"alias_for": row[9],
}

return settings_info
Expand All @@ -408,8 +431,8 @@ def get_merge_tree_settings(module, client):
Returns a dictionary with merge_tree_settings names as keys.
"""
query = ("SELECT name, value, changed, description, "
"type FROM system.merge_tree_settings")
query = ("SELECT name, value, changed, description, min, max, "
"readonly, type FROM system.merge_tree_settings")
result = execute_query(module, client, query)

if result == PRIV_ERR_CODE:
Expand All @@ -421,7 +444,10 @@ def get_merge_tree_settings(module, client):
"value": row[1],
"changed": row[2],
"description": row[3],
"type": row[4],
"min": row[4],
"max": row[5],
"readonly": row[6],
"type": row[7],
}

return merge_tree_settings_info
Expand All @@ -434,7 +460,8 @@ def get_users(module, client):
"""
query = ("SELECT name, id, storage, auth_type, auth_params, host_ip, host_names, "
"host_names_regexp, host_names_like, default_roles_all, "
"default_roles_list, default_roles_except FROM system.users")
"default_roles_list, default_roles_except, grantees_any, "
"grantees_list, grantees_except, default_database FROM system.users")
result = execute_query(module, client, query)

if result == PRIV_ERR_CODE:
Expand All @@ -455,6 +482,11 @@ def get_users(module, client):
"default_roles_all": row[9],
"default_roles_list": row[10],
"default_roles_except": row[11],
"grantees_any": row[12],
"grantees_list": row[13],
"grantees_except": row[14],
"default_database": row[15],

}

user_info[user_name]["roles"] = get_user_roles(module, client, user_name)
Expand Down Expand Up @@ -585,7 +617,7 @@ def get_settings_profile_elements(module, client):
Returns a dictionary with roles, profiles and users names as keys.
"""
query = ("SELECT profile_name, user_name, role_name, "
"index, setting_name, value, min, max, "
"index, setting_name, value, min, max, writability, "
"inherit_profile FROM system.settings_profile_elements")
result = execute_query(module, client, query)

Expand Down Expand Up @@ -620,7 +652,8 @@ def get_settings_profile_elements(module, client):
"value": row[5],
"min": row[6],
"max": row[7],
"inherit_profile": row[8],
"writability": row[8],
"inherit_profile": row[9],
})

return settings_profile_elements
Expand Down
6 changes: 3 additions & 3 deletions tests/integration/targets/clickhouse_info/tasks/initial.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@
- result["settings_profile_elements"]["users"]["eva"][0]["setting_name"] == "max_memory_usage"
- result["settings_profile_elements"]["users"]["eva"][0]["value"] == "15000"

- name: Debug
ansible.builtin.debug:
var: result
# - name: Debug
# ansible.builtin.debug:
# var: result

- name: Create user
community.clickhouse.clickhouse_client:
Expand Down

0 comments on commit 4bbd6c5

Please sign in to comment.