2222from google .cloud import monitoring
2323
2424
25-
2625def get_cpu_load ():
2726 """Returns the most recent Cloud Bigtable CPU load measurement.
2827
@@ -51,23 +50,22 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
5150 bigtable_cluster (str): Cloud Bigtable cluster ID to scale
5251 scale_up (bool): If true, scale up, otherwise scale down
5352 """
54- _MIN_NODE_COUNT = 3
55- """
56- The minimum number of nodes to use. The default minimum is 3. If you have a
57- lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
58- clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
59- metric is useful in figuring out the minimum number of nodes.
60- """
6153
62- _MAX_NODE_COUNT = 30
63- """
64- The maximum number of nodes to use. The default maximum is 30 nodes per zone.
65- If you need more quota, you can request more by following the instructions
66- <a href="https://cloud.google.com/bigtable/quota">here</a>.
67- """
54+ # The minimum number of nodes to use. The default minimum is 3. If you have
55+ # a lot of data, the rule of thumb is to not go below 2.5 TB per node for
56+ # SSD lusters, and 8 TB for HDD. The
57+ # "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring
58+ # out the minimum number of nodes.
59+ min_node_count = 3
60+
61+ # The maximum number of nodes to use. The default maximum is 30 nodes per
62+ # zone. If you need more quota, you can request more by following the
63+ # instructions at https://cloud.google.com/bigtable/quota.
64+ max_node_count = 30
65+
66+ # The number of nodes to change the cluster by.
67+ size_change_step = 3
6868
69- _SIZE_CHANGE_STEP = 3
70- """The number of nodes to change the cluster by."""
7169 # [START bigtable_scale]
7270 bigtable_client = bigtable .Client (admin = True )
7371 instance = bigtable_client .instance (bigtable_instance )
@@ -79,16 +77,16 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
7977 current_node_count = cluster .serve_nodes
8078
8179 if scale_up :
82- if current_node_count < _MAX_NODE_COUNT :
83- new_node_count = min (current_node_count + 3 , _MAX_NODE_COUNT )
80+ if current_node_count < max_node_count :
81+ new_node_count = min (current_node_count + 3 , max_node_count )
8482 cluster .serve_nodes = new_node_count
8583 cluster .update ()
8684 print ('Scaled up from {} to {} nodes.' .format (
8785 current_node_count , new_node_count ))
8886 else :
89- if current_node_count > _MIN_NODE_COUNT :
87+ if current_node_count > min_node_count :
9088 new_node_count = max (
91- current_node_count - _SIZE_CHANGE_STEP , _MIN_NODE_COUNT )
89+ current_node_count - size_change_step , min_node_count )
9290 cluster .serve_nodes = new_node_count
9391 cluster .update ()
9492 print ('Scaled down from {} to {} nodes.' .format (
0 commit comments