clickhouse集群版本部署文档

发布于:2025-04-06 ⋅ 阅读:(13) ⋅ 点赞:(0)

集群版本介绍

clickhouse是表级别的集群,一个clickhouse实例可以有分布式表,也可以有本地表。本文介绍4个节点的clickhouse情况下部署配置。

分布式表数据分成2个分片,2个副本,总共4份数据:

  • 节点1数据:分片1,副本1
  • 节点2数据:分片1,副本2
  • 节点3数据:分片2,副本1
  • 节点4数据:分片2,副本2

分片1,分片2组成一份完整的数据。这种配置可以在使用中4个节点共同处理一个sql查询,在一个节点宕机情况下保证数据不丢失,

集群版ClickHouse部署

集群版本与单机版本区别在于节点间要相互发现,与单机版部署区别如下:

  • 需要静态配置其他节点信息,配置clickhouse keeper(也可以单独部署zookeeper)

以192.168.1.10,192.168.1.11,192.168.1.12,192.168.1.13四台集群模式为例配置/etc/clickhouse-server/config.d/config.xml

实际部署时需批量替换上述4个ip

192.168.1.10

<?xml version="1.0"?>
<clickhouse>
    <listen_host>0.0.0.0</listen_host>
    <timezone>Asia/Shanghai</timezone>
    <logger>
        <level>information</level>
    </logger>
    <background_pool_size>4</background_pool_size>
    <background_schedule_pool_size>1</background_schedule_pool_size>
    <mark_cache_size>268435456</mark_cache_size>
    <merge_tree>
        <index_granularity>1024</index_granularity>
        <merge_max_block_size>1024</merge_max_block_size>
        <max_bytes_to_merge_at_max_space_in_pool>1610612736</max_bytes_to_merge_at_max_space_in_pool>
        <number_of_free_entries_in_pool_to_lower_max_size_of_merge>1
        </number_of_free_entries_in_pool_to_lower_max_size_of_merge>
        <number_of_free_entries_in_pool_to_execute_mutation>4</number_of_free_entries_in_pool_to_execute_mutation>
        <number_of_free_entries_in_pool_to_execute_optimize_entire_partition>4
        </number_of_free_entries_in_pool_to_execute_optimize_entire_partition>
    </merge_tree>
    <query_cache>
        <max_size_in_bytes>134217728</max_size_in_bytes>
        <max_entries>1024</max_entries>
        <max_entry_size_in_bytes>10485760</max_entry_size_in_bytes>
        <max_entry_size_in_rows>8192</max_entry_size_in_rows>
    </query_cache>

    <macros>
        <replica>replica1</replica>
        <cluster_shard>shard1</cluster_shard>
    </macros>

    <default_replica_path>/clickhouse/tables/{cluster_shard}/{database}/{table}</default_replica_path>
    <default_replica_name>{replica}</default_replica_name>
    <remote_servers>
        <!--集群名称,默认为bpi-->
        <bpi>
            <secret>econage123</secret>
            <shard>
                <!--分片自动复制-->
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.10</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
                <replica>
                    <host>192.168.1.11</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
            </shard>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.12</host>
                    <port>9000</port>
                </replica>
                <replica>
                    <host>192.168.1.13</host>
                    <port>9000</port>
                </replica>
            </shard>
        </bpi>
    </remote_servers>
    <zookeeper>
        <node>
            <host>192.168.1.10</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.11</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.12</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.13</host>
            <port>8181</port>
        </node>
    </zookeeper>
    <keeper_server>
        <tcp_port>8181</tcp_port>
        <server_id>1</server_id>
        <log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
        <snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>

        <coordination_settings>
            <operation_timeout_ms>10000</operation_timeout_ms>
            <session_timeout_ms>30000</session_timeout_ms>
            <raft_logs_level>warning</raft_logs_level>
        </coordination_settings>

        <raft_configuration>
            <server>
                <id>1</id>
                <hostname>192.168.1.10</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>2</id>
                <hostname>192.168.1.11</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>3</id>
                <hostname>192.168.1.12</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>4</id>
                <hostname>192.168.1.13</hostname>
                <port>9444</port>
            </server>
        </raft_configuration>
    </keeper_server>

</clickhouse>

192.168.1.11

<?xml version="1.0"?>
<clickhouse>
    <listen_host>0.0.0.0</listen_host>
    <timezone>Asia/Shanghai</timezone>
    <logger>
        <level>information</level>
    </logger>
    <background_pool_size>4</background_pool_size>
    <background_schedule_pool_size>1</background_schedule_pool_size>
    <mark_cache_size>268435456</mark_cache_size>
    <merge_tree>
        <index_granularity>1024</index_granularity>
        <merge_max_block_size>1024</merge_max_block_size>
        <max_bytes_to_merge_at_max_space_in_pool>1610612736</max_bytes_to_merge_at_max_space_in_pool>
        <number_of_free_entries_in_pool_to_lower_max_size_of_merge>1
        </number_of_free_entries_in_pool_to_lower_max_size_of_merge>
        <number_of_free_entries_in_pool_to_execute_mutation>4</number_of_free_entries_in_pool_to_execute_mutation>
        <number_of_free_entries_in_pool_to_execute_optimize_entire_partition>4
        </number_of_free_entries_in_pool_to_execute_optimize_entire_partition>
    </merge_tree>
    <query_cache>
        <max_size_in_bytes>134217728</max_size_in_bytes>
        <max_entries>1024</max_entries>
        <max_entry_size_in_bytes>10485760</max_entry_size_in_bytes>
        <max_entry_size_in_rows>8192</max_entry_size_in_rows>
    </query_cache>

    <macros>
        <replica>replica2</replica>
        <cluster_shard>shard1</cluster_shard>
    </macros>

    <default_replica_path>/clickhouse/tables/{cluster_shard}/{database}/{table}</default_replica_path>
    <default_replica_name>{replica}</default_replica_name>
    <remote_servers>
        <bpi>
            <secret>econage123</secret>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.10</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
                <replica>
                    <host>192.168.1.11</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
            </shard>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.12</host>
                    <port>9000</port>
                </replica>
                <replica>
                    <host>192.168.1.13</host>
                    <port>9000</port>
                </replica>
            </shard>
        </bpi>
    </remote_servers>
    <zookeeper>
        <node>
            <host>192.168.1.10</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.11</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.12</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.13</host>
            <port>8181</port>
        </node>
    </zookeeper>
    <keeper_server>
        <tcp_port>8181</tcp_port>
        <server_id>2</server_id>
        <log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
        <snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>

        <coordination_settings>
            <operation_timeout_ms>10000</operation_timeout_ms>
            <session_timeout_ms>30000</session_timeout_ms>
            <raft_logs_level>warning</raft_logs_level>
        </coordination_settings>

        <raft_configuration>
            <server>
                <id>1</id>
                <hostname>192.168.1.10</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>2</id>
                <hostname>192.168.1.11</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>3</id>
                <hostname>192.168.1.12</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>4</id>
                <hostname>192.168.1.13</hostname>
                <port>9444</port>
            </server>
        </raft_configuration>
    </keeper_server>

</clickhouse>

192.168.1.12

<?xml version="1.0"?>
<clickhouse>
    <listen_host>0.0.0.0</listen_host>
    <timezone>Asia/Shanghai</timezone>
    <logger>
        <level>information</level>
    </logger>
    <background_pool_size>4</background_pool_size>
    <background_schedule_pool_size>1</background_schedule_pool_size>
    <mark_cache_size>268435456</mark_cache_size>
    <merge_tree>
        <index_granularity>1024</index_granularity>
        <merge_max_block_size>1024</merge_max_block_size>
        <max_bytes_to_merge_at_max_space_in_pool>1610612736</max_bytes_to_merge_at_max_space_in_pool>
        <number_of_free_entries_in_pool_to_lower_max_size_of_merge>1
        </number_of_free_entries_in_pool_to_lower_max_size_of_merge>
        <number_of_free_entries_in_pool_to_execute_mutation>4</number_of_free_entries_in_pool_to_execute_mutation>
        <number_of_free_entries_in_pool_to_execute_optimize_entire_partition>4
        </number_of_free_entries_in_pool_to_execute_optimize_entire_partition>
    </merge_tree>
    <query_cache>
        <max_size_in_bytes>134217728</max_size_in_bytes>
        <max_entries>1024</max_entries>
        <max_entry_size_in_bytes>10485760</max_entry_size_in_bytes>
        <max_entry_size_in_rows>8192</max_entry_size_in_rows>
    </query_cache>

    <macros>
        <replica>replica1</replica>
        <cluster_shard>shard2</cluster_shard>
    </macros>

    <default_replica_path>/clickhouse/tables/{cluster_shard}/{database}/{table}</default_replica_path>
    <default_replica_name>{replica}</default_replica_name>
    <remote_servers>
        <bpi>
            <secret>econage123</secret>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.10</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
                <replica>
                    <host>192.168.1.11</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
            </shard>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.12</host>
                    <port>9000</port>
                </replica>
                <replica>
                    <host>192.168.1.13</host>
                    <port>9000</port>
                </replica>
            </shard>
        </bpi>
    </remote_servers>
    <zookeeper>
        <node>
            <host>192.168.1.10</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.11</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.12</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.13</host>
            <port>8181</port>
        </node>
    </zookeeper>
    <keeper_server>
        <tcp_port>8181</tcp_port>
        <server_id>3</server_id>
        <log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
        <snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>

        <coordination_settings>
            <operation_timeout_ms>10000</operation_timeout_ms>
            <session_timeout_ms>30000</session_timeout_ms>
            <raft_logs_level>warning</raft_logs_level>
        </coordination_settings>

        <raft_configuration>
            <server>
                <id>1</id>
                <hostname>192.168.1.10</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>2</id>
                <hostname>192.168.1.11</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>3</id>
                <hostname>192.168.1.12</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>4</id>
                <hostname>192.168.1.13</hostname>
                <port>9444</port>
            </server>
        </raft_configuration>
    </keeper_server>

</clickhouse>

192.168.1.13

<?xml version="1.0"?>
<clickhouse>
    <listen_host>0.0.0.0</listen_host>
    <timezone>Asia/Shanghai</timezone>
    <logger>
        <level>information</level>
    </logger>
    <background_pool_size>4</background_pool_size>
    <background_schedule_pool_size>1</background_schedule_pool_size>
    <mark_cache_size>268435456</mark_cache_size>
    <merge_tree>
        <index_granularity>1024</index_granularity>
        <merge_max_block_size>1024</merge_max_block_size>
        <max_bytes_to_merge_at_max_space_in_pool>1610612736</max_bytes_to_merge_at_max_space_in_pool>
        <number_of_free_entries_in_pool_to_lower_max_size_of_merge>1
        </number_of_free_entries_in_pool_to_lower_max_size_of_merge>
        <number_of_free_entries_in_pool_to_execute_mutation>4</number_of_free_entries_in_pool_to_execute_mutation>
        <number_of_free_entries_in_pool_to_execute_optimize_entire_partition>4
        </number_of_free_entries_in_pool_to_execute_optimize_entire_partition>
    </merge_tree>
    <query_cache>
        <max_size_in_bytes>134217728</max_size_in_bytes>
        <max_entries>1024</max_entries>
        <max_entry_size_in_bytes>10485760</max_entry_size_in_bytes>
        <max_entry_size_in_rows>8192</max_entry_size_in_rows>
    </query_cache>

    <macros>
        <replica>replica2</replica>
        <cluster_shard>shard2</cluster_shard>
    </macros>

    <default_replica_path>/clickhouse/tables/{cluster_shard}/{database}/{table}</default_replica_path>
    <default_replica_name>{replica}</default_replica_name>
    <remote_servers>
        <bpi>
            <secret>econage123</secret>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.10</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
                <replica>
                    <host>192.168.1.11</host><!--节点host-->
                    <port>9000</port><!--节点port-->
                </replica>
            </shard>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>192.168.1.12</host>
                    <port>9000</port>
                </replica>
                <replica>
                    <host>192.168.1.13</host>
                    <port>9000</port>
                </replica>
            </shard>
        </bpi>
    </remote_servers>
    <zookeeper>
        <node>
            <host>192.168.1.10</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.11</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.12</host>
            <port>8181</port>
        </node>
        <node>
            <host>192.168.1.13</host>
            <port>8181</port>
        </node>
    </zookeeper>
    <keeper_server>
        <tcp_port>8181</tcp_port>
        <server_id>4</server_id>
        <log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
        <snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>

        <coordination_settings>
            <operation_timeout_ms>10000</operation_timeout_ms>
            <session_timeout_ms>30000</session_timeout_ms>
            <raft_logs_level>warning</raft_logs_level>
        </coordination_settings>

        <raft_configuration>
            <server>
                <id>1</id>
                <hostname>192.168.1.10</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>2</id>
                <hostname>192.168.1.11</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>3</id>
                <hostname>192.168.1.12</hostname>
                <port>9444</port>
            </server>
            <server>
                <id>4</id>
                <hostname>192.168.1.13</hostname>
                <port>9444</port>
            </server>
        </raft_configuration>
    </keeper_server>

</clickhouse>


端口

  • 8123(clickhouse http)
  • 9000(clickhouse tcp)
  • 8181(clickhouse-keeper)
  • 9444(raft)