3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.master
;
21 import java
.io
.IOException
;
22 import java
.net
.InetAddress
;
23 import java
.util
.ArrayList
;
24 import java
.util
.HashMap
;
25 import java
.util
.HashSet
;
26 import java
.util
.List
;
28 import java
.util
.Map
.Entry
;
31 import org
.apache
.commons
.logging
.Log
;
32 import org
.apache
.commons
.logging
.LogFactory
;
33 import org
.apache
.hadoop
.hbase
.DoNotRetryIOException
;
34 import org
.apache
.hadoop
.hbase
.HConstants
;
35 import org
.apache
.hadoop
.hbase
.HRegionInfo
;
36 import org
.apache
.hadoop
.hbase
.MetaTableAccessor
;
37 import org
.apache
.hadoop
.hbase
.NamespaceDescriptor
;
38 import org
.apache
.hadoop
.hbase
.ServerLoad
;
39 import org
.apache
.hadoop
.hbase
.ServerName
;
40 import org
.apache
.hadoop
.hbase
.TableName
;
41 import org
.apache
.hadoop
.hbase
.UnknownRegionException
;
42 import org
.apache
.hadoop
.hbase
.classification
.InterfaceAudience
;
43 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
44 import org
.apache
.hadoop
.hbase
.client
.MasterSwitchType
;
45 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
46 import org
.apache
.hadoop
.hbase
.client
.TableState
;
47 import org
.apache
.hadoop
.hbase
.client
.VersionInfoUtil
;
48 import org
.apache
.hadoop
.hbase
.client
.replication
.ReplicationSerDeHelper
;
49 import org
.apache
.hadoop
.hbase
.errorhandling
.ForeignException
;
50 import org
.apache
.hadoop
.hbase
.exceptions
.UnknownProtocolException
;
51 import org
.apache
.hadoop
.hbase
.ipc
.CoprocessorRpcUtils
;
52 import org
.apache
.hadoop
.hbase
.ipc
.PriorityFunction
;
53 import org
.apache
.hadoop
.hbase
.ipc
.QosPriority
;
54 import org
.apache
.hadoop
.hbase
.ipc
.RpcServer
.BlockingServiceAndInterface
;
55 import org
.apache
.hadoop
.hbase
.ipc
.ServerRpcController
;
56 import org
.apache
.hadoop
.hbase
.master
.assignment
.RegionStates
;
57 import org
.apache
.hadoop
.hbase
.master
.locking
.LockProcedure
;
58 import org
.apache
.hadoop
.hbase
.master
.procedure
.MasterProcedureUtil
;
59 import org
.apache
.hadoop
.hbase
.master
.procedure
.MasterProcedureUtil
.NonceProcedureRunnable
;
60 import org
.apache
.hadoop
.hbase
.mob
.MobUtils
;
61 import org
.apache
.hadoop
.hbase
.procedure
.MasterProcedureManager
;
62 import org
.apache
.hadoop
.hbase
.procedure2
.LockType
;
63 import org
.apache
.hadoop
.hbase
.procedure2
.LockedResource
;
64 import org
.apache
.hadoop
.hbase
.procedure2
.Procedure
;
65 import org
.apache
.hadoop
.hbase
.procedure2
.ProcedureUtil
;
66 import org
.apache
.hadoop
.hbase
.quotas
.MasterQuotaManager
;
67 import org
.apache
.hadoop
.hbase
.quotas
.QuotaObserverChore
;
68 import org
.apache
.hadoop
.hbase
.quotas
.QuotaUtil
;
69 import org
.apache
.hadoop
.hbase
.quotas
.SpaceQuotaSnapshot
;
70 import org
.apache
.hadoop
.hbase
.regionserver
.RSRpcServices
;
71 import org
.apache
.hadoop
.hbase
.replication
.ReplicationException
;
72 import org
.apache
.hadoop
.hbase
.replication
.ReplicationPeerConfig
;
73 import org
.apache
.hadoop
.hbase
.replication
.ReplicationPeerDescription
;
74 import org
.apache
.hadoop
.hbase
.security
.User
;
75 import org
.apache
.hadoop
.hbase
.security
.access
.AccessController
;
76 import org
.apache
.hadoop
.hbase
.security
.visibility
.VisibilityController
;
77 import org
.apache
.hadoop
.hbase
.shaded
.com
.google
.protobuf
.RpcController
;
78 import org
.apache
.hadoop
.hbase
.shaded
.com
.google
.protobuf
.ServiceException
;
79 import org
.apache
.hadoop
.hbase
.shaded
.com
.google
.protobuf
.UnsafeByteOperations
;
80 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ProtobufUtil
;
81 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ResponseConverter
;
82 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.AdminProtos
.CompactRegionRequest
;
83 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.AdminProtos
.CompactRegionResponse
;
84 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.AdminProtos
.GetRegionInfoRequest
;
85 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.AdminProtos
.GetRegionInfoResponse
;
86 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ClientProtos
;
87 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ClusterStatusProtos
;
88 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ClusterStatusProtos
.RegionStoreSequenceIds
;
89 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.HBaseProtos
;
90 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.HBaseProtos
.NameStringPair
;
91 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.HBaseProtos
.ProcedureDescription
;
92 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.HBaseProtos
.RegionSpecifier
.RegionSpecifierType
;
93 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.LockServiceProtos
.LockHeartbeatRequest
;
94 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.LockServiceProtos
.LockHeartbeatResponse
;
95 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.LockServiceProtos
.LockRequest
;
96 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.LockServiceProtos
.LockResponse
;
97 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.LockServiceProtos
.LockService
;
98 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
;
99 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.AbortProcedureRequest
;
100 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.AbortProcedureResponse
;
101 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.AddColumnRequest
;
102 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.AddColumnResponse
;
103 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.AssignRegionRequest
;
104 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.AssignRegionResponse
;
105 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.BalanceRequest
;
106 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.BalanceResponse
;
107 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.CreateNamespaceRequest
;
108 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.CreateNamespaceResponse
;
109 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.CreateTableRequest
;
110 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.CreateTableResponse
;
111 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteColumnRequest
;
112 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteColumnResponse
;
113 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteNamespaceRequest
;
114 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteNamespaceResponse
;
115 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteSnapshotRequest
;
116 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteSnapshotResponse
;
117 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteTableRequest
;
118 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DeleteTableResponse
;
119 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DisableTableRequest
;
120 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DisableTableResponse
;
121 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DrainRegionServersRequest
;
122 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.DrainRegionServersResponse
;
123 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.EnableCatalogJanitorRequest
;
124 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.EnableCatalogJanitorResponse
;
125 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.EnableTableRequest
;
126 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.EnableTableResponse
;
127 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ExecProcedureRequest
;
128 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ExecProcedureResponse
;
129 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetClusterStatusRequest
;
130 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetClusterStatusResponse
;
131 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetCompletedSnapshotsRequest
;
132 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetCompletedSnapshotsResponse
;
133 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetLocksRequest
;
134 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetLocksResponse
;
135 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetNamespaceDescriptorRequest
;
136 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetNamespaceDescriptorResponse
;
137 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetProcedureResultRequest
;
138 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetProcedureResultResponse
;
139 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetProceduresRequest
;
140 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetProceduresResponse
;
141 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetSchemaAlterStatusRequest
;
142 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetSchemaAlterStatusResponse
;
143 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetTableDescriptorsRequest
;
144 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetTableDescriptorsResponse
;
145 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetTableNamesRequest
;
146 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetTableNamesResponse
;
147 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetTableStateRequest
;
148 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.GetTableStateResponse
;
149 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsBalancerEnabledRequest
;
150 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsBalancerEnabledResponse
;
151 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsCatalogJanitorEnabledRequest
;
152 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsCatalogJanitorEnabledResponse
;
153 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsCleanerChoreEnabledRequest
;
154 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsCleanerChoreEnabledResponse
;
155 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsInMaintenanceModeRequest
;
156 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsInMaintenanceModeResponse
;
157 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsMasterRunningRequest
;
158 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsMasterRunningResponse
;
159 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsNormalizerEnabledRequest
;
160 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsNormalizerEnabledResponse
;
161 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsProcedureDoneRequest
;
162 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsProcedureDoneResponse
;
163 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsSnapshotDoneRequest
;
164 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsSnapshotDoneResponse
;
165 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsSplitOrMergeEnabledRequest
;
166 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.IsSplitOrMergeEnabledResponse
;
167 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListDrainingRegionServersRequest
;
168 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListDrainingRegionServersResponse
;
169 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListNamespaceDescriptorsRequest
;
170 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListNamespaceDescriptorsResponse
;
171 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListTableDescriptorsByNamespaceRequest
;
172 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListTableDescriptorsByNamespaceResponse
;
173 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListTableNamesByNamespaceRequest
;
174 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ListTableNamesByNamespaceResponse
;
175 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MajorCompactionTimestampForRegionRequest
;
176 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MajorCompactionTimestampRequest
;
177 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MajorCompactionTimestampResponse
;
178 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MasterService
;
179 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MergeTableRegionsRequest
;
180 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MergeTableRegionsResponse
;
181 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ModifyColumnRequest
;
182 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ModifyColumnResponse
;
183 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ModifyNamespaceRequest
;
184 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ModifyNamespaceResponse
;
185 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ModifyTableRequest
;
186 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ModifyTableResponse
;
187 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MoveRegionRequest
;
188 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.MoveRegionResponse
;
189 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.NormalizeRequest
;
190 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.NormalizeResponse
;
191 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.OfflineRegionRequest
;
192 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.OfflineRegionResponse
;
193 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RemoveDrainFromRegionServersRequest
;
194 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RemoveDrainFromRegionServersResponse
;
195 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RestoreSnapshotRequest
;
196 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RestoreSnapshotResponse
;
197 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RunCatalogScanRequest
;
198 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RunCatalogScanResponse
;
199 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RunCleanerChoreRequest
;
200 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.RunCleanerChoreResponse
;
201 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SecurityCapabilitiesRequest
;
202 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SecurityCapabilitiesResponse
;
203 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetBalancerRunningRequest
;
204 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetBalancerRunningResponse
;
205 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetCleanerChoreRunningRequest
;
206 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetCleanerChoreRunningResponse
;
207 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetNormalizerRunningRequest
;
208 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetNormalizerRunningResponse
;
209 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetQuotaRequest
;
210 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetQuotaResponse
;
211 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetSplitOrMergeEnabledRequest
;
212 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SetSplitOrMergeEnabledResponse
;
213 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ShutdownRequest
;
214 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.ShutdownResponse
;
215 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SnapshotRequest
;
216 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SnapshotResponse
;
217 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SplitTableRegionRequest
;
218 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.SplitTableRegionResponse
;
219 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.StopMasterRequest
;
220 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.StopMasterResponse
;
221 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.TruncateTableRequest
;
222 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.TruncateTableResponse
;
223 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.UnassignRegionRequest
;
224 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.MasterProtos
.UnassignRegionResponse
;
225 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.QuotaProtos
.GetQuotaStatesRequest
;
226 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.QuotaProtos
.GetQuotaStatesResponse
;
227 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.QuotaProtos
.GetQuotaStatesResponse
.NamespaceQuotaSnapshot
;
228 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.QuotaProtos
.GetQuotaStatesResponse
.TableQuotaSnapshot
;
229 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.QuotaProtos
.GetSpaceQuotaRegionSizesRequest
;
230 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.QuotaProtos
.GetSpaceQuotaRegionSizesResponse
;
231 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.QuotaProtos
.GetSpaceQuotaRegionSizesResponse
.RegionSizes
;
232 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.GetLastFlushedSequenceIdRequest
;
233 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.GetLastFlushedSequenceIdResponse
;
234 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionServerReportRequest
;
235 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionServerReportResponse
;
236 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionServerStartupRequest
;
237 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionServerStartupResponse
;
238 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionServerStatusService
;
239 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionSpaceUse
;
240 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionSpaceUseReportRequest
;
241 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.RegionSpaceUseReportResponse
;
242 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.ReportRSFatalErrorRequest
;
243 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.ReportRSFatalErrorResponse
;
244 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.ReportRegionStateTransitionRequest
;
245 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.RegionServerStatusProtos
.ReportRegionStateTransitionResponse
;
246 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.AddReplicationPeerRequest
;
247 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.AddReplicationPeerResponse
;
248 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.DisableReplicationPeerRequest
;
249 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.DisableReplicationPeerResponse
;
250 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.EnableReplicationPeerRequest
;
251 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.EnableReplicationPeerResponse
;
252 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.GetReplicationPeerConfigRequest
;
253 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.GetReplicationPeerConfigResponse
;
254 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.ListReplicationPeersRequest
;
255 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.ListReplicationPeersResponse
;
256 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.RemoveReplicationPeerRequest
;
257 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.RemoveReplicationPeerResponse
;
258 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.UpdateReplicationPeerConfigRequest
;
259 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.ReplicationProtos
.UpdateReplicationPeerConfigResponse
;
260 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.SnapshotProtos
.SnapshotDescription
;
261 import org
.apache
.hadoop
.hbase
.snapshot
.ClientSnapshotDescriptionUtils
;
262 import org
.apache
.hadoop
.hbase
.snapshot
.SnapshotDescriptionUtils
;
263 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
264 import org
.apache
.hadoop
.hbase
.util
.EnvironmentEdgeManager
;
265 import org
.apache
.hadoop
.hbase
.util
.ForeignExceptionUtil
;
266 import org
.apache
.hadoop
.hbase
.util
.Pair
;
267 import org
.apache
.zookeeper
.KeeperException
;
270 * Implements the master RPC services.
272 @InterfaceAudience.Private
273 @SuppressWarnings("deprecation")
274 public class MasterRpcServices
extends RSRpcServices
275 implements MasterService
.BlockingInterface
, RegionServerStatusService
.BlockingInterface
,
276 LockService
.BlockingInterface
{
277 private static final Log LOG
= LogFactory
.getLog(MasterRpcServices
.class.getName());
279 private final HMaster master
;
282 * @return Subset of configuration to pass initializing regionservers: e.g.
283 * the filesystem to use and root directory to use.
285 private RegionServerStartupResponse
.Builder
createConfigurationSubset() {
286 RegionServerStartupResponse
.Builder resp
= addConfig(
287 RegionServerStartupResponse
.newBuilder(), HConstants
.HBASE_DIR
);
288 resp
= addConfig(resp
, "fs.defaultFS");
289 return addConfig(resp
, "hbase.master.info.port");
292 private RegionServerStartupResponse
.Builder
addConfig(
293 final RegionServerStartupResponse
.Builder resp
, final String key
) {
294 NameStringPair
.Builder entry
= NameStringPair
.newBuilder()
296 .setValue(master
.getConfiguration().get(key
));
297 resp
.addMapEntries(entry
.build());
301 public MasterRpcServices(HMaster m
) throws IOException
{
307 protected PriorityFunction
createPriority() {
308 return new MasterAnnotationReadingPriorityFunction(this);
311 enum BalanceSwitchMode
{
317 * Assigns balancer switch according to BalanceSwitchMode
318 * @param b new balancer switch
319 * @param mode BalanceSwitchMode
320 * @return old balancer switch
322 boolean switchBalancer(final boolean b
, BalanceSwitchMode mode
) throws IOException
{
323 boolean oldValue
= master
.loadBalancerTracker
.isBalancerOn();
324 boolean newValue
= b
;
326 if (master
.cpHost
!= null) {
327 newValue
= master
.cpHost
.preBalanceSwitch(newValue
);
330 if (mode
== BalanceSwitchMode
.SYNC
) {
331 synchronized (master
.getLoadBalancer()) {
332 master
.loadBalancerTracker
.setBalancerOn(newValue
);
335 master
.loadBalancerTracker
.setBalancerOn(newValue
);
337 } catch (KeeperException ke
) {
338 throw new IOException(ke
);
340 LOG
.info(master
.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue
);
341 if (master
.cpHost
!= null) {
342 master
.cpHost
.postBalanceSwitch(oldValue
, newValue
);
344 } catch (IOException ioe
) {
345 LOG
.warn("Error flipping balance switch", ioe
);
350 boolean synchronousBalanceSwitch(final boolean b
) throws IOException
{
351 return switchBalancer(b
, BalanceSwitchMode
.SYNC
);
355 * Sets normalizer on/off flag in ZK.
357 public boolean normalizerSwitch(boolean on
) {
358 boolean oldValue
= master
.getRegionNormalizerTracker().isNormalizerOn();
359 boolean newValue
= on
;
362 master
.getRegionNormalizerTracker().setNormalizerOn(newValue
);
363 } catch (KeeperException ke
) {
364 throw new IOException(ke
);
366 LOG
.info(master
.getClientIdAuditPrefix() + " set normalizerSwitch=" + newValue
);
367 } catch (IOException ioe
) {
368 LOG
.warn("Error flipping normalizer switch", ioe
);
374 * @return list of blocking services and their security info classes that this server supports
377 protected List
<BlockingServiceAndInterface
> getServices() {
378 List
<BlockingServiceAndInterface
> bssi
= new ArrayList
<>(5);
379 bssi
.add(new BlockingServiceAndInterface(
380 MasterService
.newReflectiveBlockingService(this),
381 MasterService
.BlockingInterface
.class));
382 bssi
.add(new BlockingServiceAndInterface(
383 RegionServerStatusService
.newReflectiveBlockingService(this),
384 RegionServerStatusService
.BlockingInterface
.class));
385 bssi
.add(new BlockingServiceAndInterface(LockService
.newReflectiveBlockingService(this),
386 LockService
.BlockingInterface
.class));
387 bssi
.addAll(super.getServices());
392 @QosPriority(priority
= HConstants
.ADMIN_QOS
)
393 public GetLastFlushedSequenceIdResponse
getLastFlushedSequenceId(RpcController controller
,
394 GetLastFlushedSequenceIdRequest request
) throws ServiceException
{
396 master
.checkServiceStarted();
397 } catch (IOException ioe
) {
398 throw new ServiceException(ioe
);
400 byte[] encodedRegionName
= request
.getRegionName().toByteArray();
401 RegionStoreSequenceIds ids
= master
.getServerManager()
402 .getLastFlushedSequenceId(encodedRegionName
);
403 return ResponseConverter
.buildGetLastFlushedSequenceIdResponse(ids
);
407 public RegionServerReportResponse
regionServerReport(
408 RpcController controller
, RegionServerReportRequest request
) throws ServiceException
{
410 master
.checkServiceStarted();
411 ClusterStatusProtos
.ServerLoad sl
= request
.getLoad();
412 ServerName serverName
= ProtobufUtil
.toServerName(request
.getServer());
413 ServerLoad oldLoad
= master
.getServerManager().getLoad(serverName
);
414 ServerLoad newLoad
= new ServerLoad(sl
);
415 master
.getServerManager().regionServerReport(serverName
, newLoad
);
416 int version
= VersionInfoUtil
.getCurrentClientVersionNumber();
417 master
.getAssignmentManager().reportOnlineRegions(serverName
,
418 version
, newLoad
.getRegionsLoad().keySet());
419 if (sl
!= null && master
.metricsMaster
!= null) {
421 master
.metricsMaster
.incrementRequests(sl
.getTotalNumberOfRequests()
422 - (oldLoad
!= null ? oldLoad
.getTotalNumberOfRequests() : 0));
424 } catch (IOException ioe
) {
425 throw new ServiceException(ioe
);
427 return RegionServerReportResponse
.newBuilder().build();
431 public RegionServerStartupResponse
regionServerStartup(
432 RpcController controller
, RegionServerStartupRequest request
) throws ServiceException
{
433 // Register with server manager
435 master
.checkServiceStarted();
436 InetAddress ia
= master
.getRemoteInetAddress(
437 request
.getPort(), request
.getServerStartCode());
438 // if regionserver passed hostname to use,
439 // then use it instead of doing a reverse DNS lookup
440 ServerName rs
= master
.getServerManager().regionServerStartup(request
, ia
);
442 // Send back some config info
443 RegionServerStartupResponse
.Builder resp
= createConfigurationSubset();
444 NameStringPair
.Builder entry
= NameStringPair
.newBuilder()
445 .setName(HConstants
.KEY_FOR_HOSTNAME_SEEN_BY_MASTER
)
446 .setValue(rs
.getHostname());
447 resp
.addMapEntries(entry
.build());
450 } catch (IOException ioe
) {
451 throw new ServiceException(ioe
);
456 public ReportRSFatalErrorResponse
reportRSFatalError(
457 RpcController controller
, ReportRSFatalErrorRequest request
) throws ServiceException
{
458 String errorText
= request
.getErrorMessage();
459 ServerName sn
= ProtobufUtil
.toServerName(request
.getServer());
460 String msg
= "Region server " + sn
461 + " reported a fatal error:\n" + errorText
;
463 master
.rsFatals
.add(msg
);
464 return ReportRSFatalErrorResponse
.newBuilder().build();
468 public AddColumnResponse
addColumn(RpcController controller
,
469 AddColumnRequest req
) throws ServiceException
{
471 long procId
= master
.addColumn(
472 ProtobufUtil
.toTableName(req
.getTableName()),
473 ProtobufUtil
.toColumnFamilyDescriptor(req
.getColumnFamilies()),
477 // This mean operation was not performed in server, so do not set any procId
478 return AddColumnResponse
.newBuilder().build();
480 return AddColumnResponse
.newBuilder().setProcId(procId
).build();
482 } catch (IOException ioe
) {
483 throw new ServiceException(ioe
);
488 public AssignRegionResponse
assignRegion(RpcController controller
,
489 AssignRegionRequest req
) throws ServiceException
{
491 master
.checkInitialized();
493 final RegionSpecifierType type
= req
.getRegion().getType();
494 if (type
!= RegionSpecifierType
.REGION_NAME
) {
495 LOG
.warn("assignRegion specifier type: expected: " + RegionSpecifierType
.REGION_NAME
496 + " actual: " + type
);
499 final byte[] regionName
= req
.getRegion().getValue().toByteArray();
500 final HRegionInfo regionInfo
= master
.getAssignmentManager().getRegionInfo(regionName
);
501 if (regionInfo
== null) throw new UnknownRegionException(Bytes
.toStringBinary(regionName
));
503 final AssignRegionResponse arr
= AssignRegionResponse
.newBuilder().build();
504 if (master
.cpHost
!= null) {
505 if (master
.cpHost
.preAssign(regionInfo
)) {
509 LOG
.info(master
.getClientIdAuditPrefix() + " assign " + regionInfo
.getRegionNameAsString());
510 master
.getAssignmentManager().assign(regionInfo
, true);
511 if (master
.cpHost
!= null) {
512 master
.cpHost
.postAssign(regionInfo
);
515 } catch (IOException ioe
) {
516 throw new ServiceException(ioe
);
522 public BalanceResponse
balance(RpcController controller
,
523 BalanceRequest request
) throws ServiceException
{
525 return BalanceResponse
.newBuilder().setBalancerRan(master
.balance(
526 request
.hasForce() ? request
.getForce() : false)).build();
527 } catch (IOException ex
) {
528 throw new ServiceException(ex
);
533 public CreateNamespaceResponse
createNamespace(RpcController controller
,
534 CreateNamespaceRequest request
) throws ServiceException
{
536 long procId
= master
.createNamespace(
537 ProtobufUtil
.toNamespaceDescriptor(request
.getNamespaceDescriptor()),
538 request
.getNonceGroup(),
540 return CreateNamespaceResponse
.newBuilder().setProcId(procId
).build();
541 } catch (IOException e
) {
542 throw new ServiceException(e
);
547 public CreateTableResponse
createTable(RpcController controller
, CreateTableRequest req
)
548 throws ServiceException
{
549 TableDescriptor tableDescriptor
= ProtobufUtil
.toTableDescriptor(req
.getTableSchema());
550 byte [][] splitKeys
= ProtobufUtil
.getSplitKeysArray(req
);
553 master
.createTable(tableDescriptor
, splitKeys
, req
.getNonceGroup(), req
.getNonce());
554 return CreateTableResponse
.newBuilder().setProcId(procId
).build();
555 } catch (IOException ioe
) {
556 throw new ServiceException(ioe
);
561 public DeleteColumnResponse
deleteColumn(RpcController controller
,
562 DeleteColumnRequest req
) throws ServiceException
{
564 long procId
= master
.deleteColumn(
565 ProtobufUtil
.toTableName(req
.getTableName()),
566 req
.getColumnName().toByteArray(),
570 // This mean operation was not performed in server, so do not set any procId
571 return DeleteColumnResponse
.newBuilder().build();
573 return DeleteColumnResponse
.newBuilder().setProcId(procId
).build();
575 } catch (IOException ioe
) {
576 throw new ServiceException(ioe
);
581 public DeleteNamespaceResponse
deleteNamespace(RpcController controller
,
582 DeleteNamespaceRequest request
) throws ServiceException
{
584 long procId
= master
.deleteNamespace(
585 request
.getNamespaceName(),
586 request
.getNonceGroup(),
588 return DeleteNamespaceResponse
.newBuilder().setProcId(procId
).build();
589 } catch (IOException e
) {
590 throw new ServiceException(e
);
595 * Execute Delete Snapshot operation.
596 * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
598 * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
602 public DeleteSnapshotResponse
deleteSnapshot(RpcController controller
,
603 DeleteSnapshotRequest request
) throws ServiceException
{
605 master
.checkInitialized();
606 master
.snapshotManager
.checkSnapshotSupport();
608 LOG
.info(master
.getClientIdAuditPrefix() + " delete " + request
.getSnapshot());
609 master
.snapshotManager
.deleteSnapshot(request
.getSnapshot());
610 return DeleteSnapshotResponse
.newBuilder().build();
611 } catch (IOException e
) {
612 throw new ServiceException(e
);
617 public DeleteTableResponse
deleteTable(RpcController controller
,
618 DeleteTableRequest request
) throws ServiceException
{
620 long procId
= master
.deleteTable(ProtobufUtil
.toTableName(
621 request
.getTableName()), request
.getNonceGroup(), request
.getNonce());
622 return DeleteTableResponse
.newBuilder().setProcId(procId
).build();
623 } catch (IOException ioe
) {
624 throw new ServiceException(ioe
);
629 public TruncateTableResponse
truncateTable(RpcController controller
, TruncateTableRequest request
)
630 throws ServiceException
{
632 long procId
= master
.truncateTable(
633 ProtobufUtil
.toTableName(request
.getTableName()),
634 request
.getPreserveSplits(),
635 request
.getNonceGroup(),
637 return TruncateTableResponse
.newBuilder().setProcId(procId
).build();
638 } catch (IOException ioe
) {
639 throw new ServiceException(ioe
);
644 public DisableTableResponse
disableTable(RpcController controller
,
645 DisableTableRequest request
) throws ServiceException
{
647 long procId
= master
.disableTable(
648 ProtobufUtil
.toTableName(request
.getTableName()),
649 request
.getNonceGroup(),
651 return DisableTableResponse
.newBuilder().setProcId(procId
).build();
652 } catch (IOException ioe
) {
653 throw new ServiceException(ioe
);
658 public EnableCatalogJanitorResponse
enableCatalogJanitor(RpcController c
,
659 EnableCatalogJanitorRequest req
) throws ServiceException
{
661 master
.checkInitialized();
662 } catch (IOException ioe
) {
663 throw new ServiceException(ioe
);
665 return EnableCatalogJanitorResponse
.newBuilder().setPrevValue(
666 master
.catalogJanitorChore
.setEnabled(req
.getEnable())).build();
670 public SetCleanerChoreRunningResponse
setCleanerChoreRunning(RpcController c
,
671 SetCleanerChoreRunningRequest req
)
672 throws ServiceException
{
674 master
.checkInitialized();
675 } catch (IOException ioe
) {
676 throw new ServiceException(ioe
);
679 master
.getLogCleaner().getEnabled() && master
.getHFileCleaner().getEnabled();
680 master
.getLogCleaner().setEnabled(req
.getOn());
681 master
.getHFileCleaner().setEnabled(req
.getOn());
682 return SetCleanerChoreRunningResponse
.newBuilder().setPrevValue(prevValue
).build();
686 public EnableTableResponse
enableTable(RpcController controller
,
687 EnableTableRequest request
) throws ServiceException
{
689 long procId
= master
.enableTable(
690 ProtobufUtil
.toTableName(request
.getTableName()),
691 request
.getNonceGroup(),
693 return EnableTableResponse
.newBuilder().setProcId(procId
).build();
694 } catch (IOException ioe
) {
695 throw new ServiceException(ioe
);
700 public MergeTableRegionsResponse
mergeTableRegions(
701 RpcController c
, MergeTableRegionsRequest request
) throws ServiceException
{
703 master
.checkInitialized();
704 } catch (IOException ioe
) {
705 throw new ServiceException(ioe
);
708 RegionStates regionStates
= master
.getAssignmentManager().getRegionStates();
710 assert(request
.getRegionCount() == 2);
711 HRegionInfo
[] regionsToMerge
= new HRegionInfo
[request
.getRegionCount()];
712 for (int i
= 0; i
< request
.getRegionCount(); i
++) {
713 final byte[] encodedNameOfRegion
= request
.getRegion(i
).getValue().toByteArray();
714 if (request
.getRegion(i
).getType() != RegionSpecifierType
.ENCODED_REGION_NAME
) {
715 LOG
.warn("MergeRegions specifier type: expected: "
716 + RegionSpecifierType
.ENCODED_REGION_NAME
+ " actual: region " + i
+ " ="
717 + request
.getRegion(i
).getType());
719 RegionState regionState
= regionStates
.getRegionState(Bytes
.toString(encodedNameOfRegion
));
720 if (regionState
== null) {
721 throw new ServiceException(
722 new UnknownRegionException(Bytes
.toStringBinary(encodedNameOfRegion
)));
724 regionsToMerge
[i
] = regionState
.getRegion();
728 long procId
= master
.mergeRegions(
730 request
.getForcible(),
731 request
.getNonceGroup(),
733 return MergeTableRegionsResponse
.newBuilder().setProcId(procId
).build();
734 } catch (IOException ioe
) {
735 throw new ServiceException(ioe
);
740 public SplitTableRegionResponse
splitRegion(final RpcController controller
,
741 final SplitTableRegionRequest request
) throws ServiceException
{
743 long procId
= master
.splitRegion(
744 HRegionInfo
.convert(request
.getRegionInfo()),
745 request
.hasSplitRow() ? request
.getSplitRow().toByteArray() : null,
746 request
.getNonceGroup(),
748 return SplitTableRegionResponse
.newBuilder().setProcId(procId
).build();
749 } catch (IOException ie
) {
750 throw new ServiceException(ie
);
755 public ClientProtos
.CoprocessorServiceResponse
execMasterService(final RpcController controller
,
756 final ClientProtos
.CoprocessorServiceRequest request
) throws ServiceException
{
758 master
.checkInitialized();
759 ServerRpcController execController
= new ServerRpcController();
761 ClientProtos
.CoprocessorServiceCall call
= request
.getCall();
762 String serviceName
= call
.getServiceName();
763 String methodName
= call
.getMethodName();
764 if (!master
.coprocessorServiceHandlers
.containsKey(serviceName
)) {
765 throw new UnknownProtocolException(null,
766 "No registered Master Coprocessor Endpoint found for " + serviceName
+
767 ". Has it been enabled?");
770 com
.google
.protobuf
.Service service
= master
.coprocessorServiceHandlers
.get(serviceName
);
771 com
.google
.protobuf
.Descriptors
.ServiceDescriptor serviceDesc
= service
.getDescriptorForType();
772 com
.google
.protobuf
.Descriptors
.MethodDescriptor methodDesc
=
773 CoprocessorRpcUtils
.getMethodDescriptor(methodName
, serviceDesc
);
775 com
.google
.protobuf
.Message execRequest
=
776 CoprocessorRpcUtils
.getRequest(service
, methodDesc
, call
.getRequest());
777 final com
.google
.protobuf
.Message
.Builder responseBuilder
=
778 service
.getResponsePrototype(methodDesc
).newBuilderForType();
779 service
.callMethod(methodDesc
, execController
, execRequest
,
780 new com
.google
.protobuf
.RpcCallback
<com
.google
.protobuf
.Message
>() {
782 public void run(com
.google
.protobuf
.Message message
) {
783 if (message
!= null) {
784 responseBuilder
.mergeFrom(message
);
788 com
.google
.protobuf
.Message execResult
= responseBuilder
.build();
789 if (execController
.getFailedOn() != null) {
790 throw execController
.getFailedOn();
792 return CoprocessorRpcUtils
.getResponse(execResult
, HConstants
.EMPTY_BYTE_ARRAY
);
793 } catch (IOException ie
) {
794 throw new ServiceException(ie
);
799 * Triggers an asynchronous attempt to run a distributed procedure.
803 public ExecProcedureResponse
execProcedure(RpcController controller
,
804 ExecProcedureRequest request
) throws ServiceException
{
806 master
.checkInitialized();
807 ProcedureDescription desc
= request
.getProcedure();
808 MasterProcedureManager mpm
= master
.getMasterProcedureManagerHost().getProcedureManager(
809 desc
.getSignature());
811 throw new ServiceException(new DoNotRetryIOException("The procedure is not registered: "
812 + desc
.getSignature()));
815 LOG
.info(master
.getClientIdAuditPrefix() + " procedure request for: "
816 + desc
.getSignature());
818 mpm
.execProcedure(desc
);
820 // send back the max amount of time the client should wait for the procedure
822 long waitTime
= SnapshotDescriptionUtils
.DEFAULT_MAX_WAIT_TIME
;
823 return ExecProcedureResponse
.newBuilder().setExpectedTimeout(
825 } catch (ForeignException e
) {
826 throw new ServiceException(e
.getCause());
827 } catch (IOException e
) {
828 throw new ServiceException(e
);
833 * Triggers a synchronous attempt to run a distributed procedure and sets
834 * return data in response.
838 public ExecProcedureResponse
execProcedureWithRet(RpcController controller
,
839 ExecProcedureRequest request
) throws ServiceException
{
841 master
.checkInitialized();
842 ProcedureDescription desc
= request
.getProcedure();
843 MasterProcedureManager mpm
= master
.getMasterProcedureManagerHost().getProcedureManager(
844 desc
.getSignature());
846 throw new ServiceException("The procedure is not registered: "
847 + desc
.getSignature());
850 LOG
.info(master
.getClientIdAuditPrefix() + " procedure request for: "
851 + desc
.getSignature());
853 byte[] data
= mpm
.execProcedureWithRet(desc
);
855 ExecProcedureResponse
.Builder builder
= ExecProcedureResponse
.newBuilder();
856 // set return data if available
858 builder
.setReturnData(UnsafeByteOperations
.unsafeWrap(data
));
860 return builder
.build();
861 } catch (IOException e
) {
862 throw new ServiceException(e
);
867 public GetClusterStatusResponse
getClusterStatus(RpcController controller
,
868 GetClusterStatusRequest req
) throws ServiceException
{
869 GetClusterStatusResponse
.Builder response
= GetClusterStatusResponse
.newBuilder();
871 master
.checkInitialized();
872 response
.setClusterStatus(ProtobufUtil
.convert(
873 master
.getClusterStatus(ProtobufUtil
.toOptions(req
.getClusterOptions()))));
874 } catch (IOException e
) {
875 throw new ServiceException(e
);
877 return response
.build();
881 * List the currently available/stored snapshots. Any in-progress snapshots are ignored
884 public GetCompletedSnapshotsResponse
getCompletedSnapshots(RpcController controller
,
885 GetCompletedSnapshotsRequest request
) throws ServiceException
{
887 master
.checkInitialized();
888 GetCompletedSnapshotsResponse
.Builder builder
= GetCompletedSnapshotsResponse
.newBuilder();
889 List
<SnapshotDescription
> snapshots
= master
.snapshotManager
.getCompletedSnapshots();
891 // convert to protobuf
892 for (SnapshotDescription snapshot
: snapshots
) {
893 builder
.addSnapshots(snapshot
);
895 return builder
.build();
896 } catch (IOException e
) {
897 throw new ServiceException(e
);
902 public GetNamespaceDescriptorResponse
getNamespaceDescriptor(
903 RpcController controller
, GetNamespaceDescriptorRequest request
)
904 throws ServiceException
{
906 return GetNamespaceDescriptorResponse
.newBuilder()
907 .setNamespaceDescriptor(ProtobufUtil
.toProtoNamespaceDescriptor(
908 master
.getNamespace(request
.getNamespaceName())))
910 } catch (IOException e
) {
911 throw new ServiceException(e
);
916 * Get the number of regions of the table that have been updated by the alter.
918 * @return Pair indicating the number of regions updated Pair.getFirst is the
919 * regions that are yet to be updated Pair.getSecond is the total number
920 * of regions of the table
921 * @throws ServiceException
924 public GetSchemaAlterStatusResponse
getSchemaAlterStatus(
925 RpcController controller
, GetSchemaAlterStatusRequest req
) throws ServiceException
{
926 // TODO: currently, we query using the table name on the client side. this
927 // may overlap with other table operations or the table operation may
928 // have completed before querying this API. We need to refactor to a
929 // transaction system in the future to avoid these ambiguities.
930 TableName tableName
= ProtobufUtil
.toTableName(req
.getTableName());
933 master
.checkInitialized();
934 Pair
<Integer
,Integer
> pair
= master
.getAssignmentManager().getReopenStatus(tableName
);
935 GetSchemaAlterStatusResponse
.Builder ret
= GetSchemaAlterStatusResponse
.newBuilder();
936 ret
.setYetToUpdateRegions(pair
.getFirst());
937 ret
.setTotalRegions(pair
.getSecond());
939 } catch (IOException ioe
) {
940 throw new ServiceException(ioe
);
945 * Get list of TableDescriptors for requested tables.
946 * @param c Unused (set to null).
947 * @param req GetTableDescriptorsRequest that contains:
948 * - tableNames: requested tables, or if empty, all are requested
949 * @return GetTableDescriptorsResponse
950 * @throws ServiceException
953 public GetTableDescriptorsResponse
getTableDescriptors(RpcController c
,
954 GetTableDescriptorsRequest req
) throws ServiceException
{
956 master
.checkInitialized();
958 final String regex
= req
.hasRegex() ? req
.getRegex() : null;
959 final String namespace
= req
.hasNamespace() ? req
.getNamespace() : null;
960 List
<TableName
> tableNameList
= null;
961 if (req
.getTableNamesCount() > 0) {
962 tableNameList
= new ArrayList
<TableName
>(req
.getTableNamesCount());
963 for (HBaseProtos
.TableName tableNamePB
: req
.getTableNamesList()) {
964 tableNameList
.add(ProtobufUtil
.toTableName(tableNamePB
));
968 List
<TableDescriptor
> descriptors
= master
.listTableDescriptors(namespace
, regex
,
969 tableNameList
, req
.getIncludeSysTables());
971 GetTableDescriptorsResponse
.Builder builder
= GetTableDescriptorsResponse
.newBuilder();
972 if (descriptors
!= null && descriptors
.size() > 0) {
973 // Add the table descriptors to the response
974 for (TableDescriptor htd
: descriptors
) {
975 builder
.addTableSchema(ProtobufUtil
.toTableSchema(htd
));
978 return builder
.build();
979 } catch (IOException ioe
) {
980 throw new ServiceException(ioe
);
985 * Get list of userspace table names
986 * @param controller Unused (set to null).
987 * @param req GetTableNamesRequest
988 * @return GetTableNamesResponse
989 * @throws ServiceException
992 public GetTableNamesResponse
getTableNames(RpcController controller
,
993 GetTableNamesRequest req
) throws ServiceException
{
995 master
.checkServiceStarted();
997 final String regex
= req
.hasRegex() ? req
.getRegex() : null;
998 final String namespace
= req
.hasNamespace() ? req
.getNamespace() : null;
999 List
<TableName
> tableNames
= master
.listTableNames(namespace
, regex
,
1000 req
.getIncludeSysTables());
1002 GetTableNamesResponse
.Builder builder
= GetTableNamesResponse
.newBuilder();
1003 if (tableNames
!= null && tableNames
.size() > 0) {
1004 // Add the table names to the response
1005 for (TableName table
: tableNames
) {
1006 builder
.addTableNames(ProtobufUtil
.toProtoTableName(table
));
1009 return builder
.build();
1010 } catch (IOException e
) {
1011 throw new ServiceException(e
);
1016 public GetTableStateResponse
getTableState(RpcController controller
,
1017 GetTableStateRequest request
) throws ServiceException
{
1019 master
.checkServiceStarted();
1020 TableName tableName
= ProtobufUtil
.toTableName(request
.getTableName());
1021 TableState
.State state
= master
.getTableStateManager()
1022 .getTableState(tableName
);
1023 GetTableStateResponse
.Builder builder
= GetTableStateResponse
.newBuilder();
1024 builder
.setTableState(new TableState(tableName
, state
).convert());
1025 return builder
.build();
1026 } catch (IOException e
) {
1027 throw new ServiceException(e
);
1032 public IsCatalogJanitorEnabledResponse
isCatalogJanitorEnabled(RpcController c
,
1033 IsCatalogJanitorEnabledRequest req
) throws ServiceException
{
1034 return IsCatalogJanitorEnabledResponse
.newBuilder().setValue(
1035 master
.isCatalogJanitorEnabled()).build();
1039 public IsCleanerChoreEnabledResponse
isCleanerChoreEnabled(RpcController c
,
1040 IsCleanerChoreEnabledRequest req
)
1041 throws ServiceException
{
1042 return IsCleanerChoreEnabledResponse
.newBuilder().setValue(master
.isCleanerChoreEnabled())
1047 public IsMasterRunningResponse
isMasterRunning(RpcController c
,
1048 IsMasterRunningRequest req
) throws ServiceException
{
1050 master
.checkServiceStarted();
1051 return IsMasterRunningResponse
.newBuilder().setIsMasterRunning(
1052 !master
.isStopped()).build();
1053 } catch (IOException e
) {
1054 throw new ServiceException(e
);
1059 * Checks if the specified procedure is done.
1060 * @return true if the procedure is done, false if the procedure is in the process of completing
1061 * @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
1064 public IsProcedureDoneResponse
isProcedureDone(RpcController controller
,
1065 IsProcedureDoneRequest request
) throws ServiceException
{
1067 master
.checkInitialized();
1068 ProcedureDescription desc
= request
.getProcedure();
1069 MasterProcedureManager mpm
= master
.getMasterProcedureManagerHost().getProcedureManager(
1070 desc
.getSignature());
1072 throw new ServiceException("The procedure is not registered: "
1073 + desc
.getSignature());
1075 LOG
.debug("Checking to see if procedure from request:"
1076 + desc
.getSignature() + " is done");
1078 IsProcedureDoneResponse
.Builder builder
=
1079 IsProcedureDoneResponse
.newBuilder();
1080 boolean done
= mpm
.isProcedureDone(desc
);
1081 builder
.setDone(done
);
1082 return builder
.build();
1083 } catch (ForeignException e
) {
1084 throw new ServiceException(e
.getCause());
1085 } catch (IOException e
) {
1086 throw new ServiceException(e
);
1091 * Checks if the specified snapshot is done.
1092 * @return true if the snapshot is in file system ready to use,
1093 * false if the snapshot is in the process of completing
1094 * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or
1095 * a wrapped HBaseSnapshotException with progress failure reason.
1098 public IsSnapshotDoneResponse
isSnapshotDone(RpcController controller
,
1099 IsSnapshotDoneRequest request
) throws ServiceException
{
1100 LOG
.debug("Checking to see if snapshot from request:" +
1101 ClientSnapshotDescriptionUtils
.toString(request
.getSnapshot()) + " is done");
1103 master
.checkInitialized();
1104 IsSnapshotDoneResponse
.Builder builder
= IsSnapshotDoneResponse
.newBuilder();
1105 boolean done
= master
.snapshotManager
.isSnapshotDone(request
.getSnapshot());
1106 builder
.setDone(done
);
1107 return builder
.build();
1108 } catch (ForeignException e
) {
1109 throw new ServiceException(e
.getCause());
1110 } catch (IOException e
) {
1111 throw new ServiceException(e
);
1116 public GetProcedureResultResponse
getProcedureResult(RpcController controller
,
1117 GetProcedureResultRequest request
) throws ServiceException
{
1118 LOG
.debug("Checking to see if procedure is done pid=" + request
.getProcId());
1120 master
.checkInitialized();
1121 GetProcedureResultResponse
.Builder builder
= GetProcedureResultResponse
.newBuilder();
1123 Procedure
<?
> result
= master
.getMasterProcedureExecutor()
1124 .getResultOrProcedure(request
.getProcId());
1125 if (result
== null) {
1126 builder
.setState(GetProcedureResultResponse
.State
.NOT_FOUND
);
1128 boolean remove
= false;
1130 if (result
.isFinished() || result
.isFailed()) {
1131 builder
.setState(GetProcedureResultResponse
.State
.FINISHED
);
1134 builder
.setState(GetProcedureResultResponse
.State
.RUNNING
);
1137 builder
.setSubmittedTime(result
.getSubmittedTime());
1138 builder
.setLastUpdate(result
.getLastUpdate());
1139 if (result
.isFailed()) {
1140 IOException exception
= result
.getException().unwrapRemoteIOException();
1141 builder
.setException(ForeignExceptionUtil
.toProtoForeignException(exception
));
1143 byte[] resultData
= result
.getResult();
1144 if (resultData
!= null) {
1145 builder
.setResult(UnsafeByteOperations
.unsafeWrap(resultData
));
1149 master
.getMasterProcedureExecutor().removeResult(request
.getProcId());
1152 return builder
.build();
1153 } catch (IOException e
) {
1154 throw new ServiceException(e
);
1159 public AbortProcedureResponse
abortProcedure(
1160 RpcController rpcController
,
1161 AbortProcedureRequest request
) throws ServiceException
{
1163 AbortProcedureResponse
.Builder response
= AbortProcedureResponse
.newBuilder();
1164 boolean abortResult
=
1165 master
.abortProcedure(request
.getProcId(), request
.getMayInterruptIfRunning());
1166 response
.setIsProcedureAborted(abortResult
);
1167 return response
.build();
1168 } catch (IOException e
) {
1169 throw new ServiceException(e
);
1174 public ListNamespaceDescriptorsResponse
listNamespaceDescriptors(RpcController c
,
1175 ListNamespaceDescriptorsRequest request
) throws ServiceException
{
1177 ListNamespaceDescriptorsResponse
.Builder response
=
1178 ListNamespaceDescriptorsResponse
.newBuilder();
1179 for(NamespaceDescriptor ns
: master
.getNamespaces()) {
1180 response
.addNamespaceDescriptor(ProtobufUtil
.toProtoNamespaceDescriptor(ns
));
1182 return response
.build();
1183 } catch (IOException e
) {
1184 throw new ServiceException(e
);
1189 public GetProceduresResponse
getProcedures(
1190 RpcController rpcController
,
1191 GetProceduresRequest request
) throws ServiceException
{
1193 final GetProceduresResponse
.Builder response
= GetProceduresResponse
.newBuilder();
1194 for (Procedure
<?
> p
: master
.getProcedures()) {
1195 response
.addProcedure(ProcedureUtil
.convertToProtoProcedure(p
));
1197 return response
.build();
1198 } catch (IOException e
) {
1199 throw new ServiceException(e
);
1204 public GetLocksResponse
getLocks(
1205 RpcController controller
,
1206 GetLocksRequest request
) throws ServiceException
{
1208 final GetLocksResponse
.Builder builder
= GetLocksResponse
.newBuilder();
1210 for (LockedResource lockedResource
: master
.getLocks()) {
1211 builder
.addLock(ProcedureUtil
.convertToProtoLockedResource(lockedResource
));
1214 return builder
.build();
1215 } catch (IOException e
) {
1216 throw new ServiceException(e
);
1221 public ListTableDescriptorsByNamespaceResponse
listTableDescriptorsByNamespace(RpcController c
,
1222 ListTableDescriptorsByNamespaceRequest request
) throws ServiceException
{
1224 ListTableDescriptorsByNamespaceResponse
.Builder b
=
1225 ListTableDescriptorsByNamespaceResponse
.newBuilder();
1226 for (TableDescriptor htd
: master
1227 .listTableDescriptorsByNamespace(request
.getNamespaceName())) {
1228 b
.addTableSchema(ProtobufUtil
.toTableSchema(htd
));
1231 } catch (IOException e
) {
1232 throw new ServiceException(e
);
1237 public ListTableNamesByNamespaceResponse
listTableNamesByNamespace(RpcController c
,
1238 ListTableNamesByNamespaceRequest request
) throws ServiceException
{
1240 ListTableNamesByNamespaceResponse
.Builder b
=
1241 ListTableNamesByNamespaceResponse
.newBuilder();
1242 for (TableName tableName
: master
.listTableNamesByNamespace(request
.getNamespaceName())) {
1243 b
.addTableName(ProtobufUtil
.toProtoTableName(tableName
));
1246 } catch (IOException e
) {
1247 throw new ServiceException(e
);
1252 public ModifyColumnResponse
modifyColumn(RpcController controller
,
1253 ModifyColumnRequest req
) throws ServiceException
{
1255 long procId
= master
.modifyColumn(
1256 ProtobufUtil
.toTableName(req
.getTableName()),
1257 ProtobufUtil
.toColumnFamilyDescriptor(req
.getColumnFamilies()),
1258 req
.getNonceGroup(),
1261 // This mean operation was not performed in server, so do not set any procId
1262 return ModifyColumnResponse
.newBuilder().build();
1264 return ModifyColumnResponse
.newBuilder().setProcId(procId
).build();
1266 } catch (IOException ioe
) {
1267 throw new ServiceException(ioe
);
1272 public ModifyNamespaceResponse
modifyNamespace(RpcController controller
,
1273 ModifyNamespaceRequest request
) throws ServiceException
{
1275 long procId
= master
.modifyNamespace(
1276 ProtobufUtil
.toNamespaceDescriptor(request
.getNamespaceDescriptor()),
1277 request
.getNonceGroup(),
1278 request
.getNonce());
1279 return ModifyNamespaceResponse
.newBuilder().setProcId(procId
).build();
1280 } catch (IOException e
) {
1281 throw new ServiceException(e
);
1286 public ModifyTableResponse
modifyTable(RpcController controller
,
1287 ModifyTableRequest req
) throws ServiceException
{
1289 long procId
= master
.modifyTable(
1290 ProtobufUtil
.toTableName(req
.getTableName()),
1291 ProtobufUtil
.toTableDescriptor(req
.getTableSchema()),
1292 req
.getNonceGroup(),
1294 return ModifyTableResponse
.newBuilder().setProcId(procId
).build();
1295 } catch (IOException ioe
) {
1296 throw new ServiceException(ioe
);
1301 public MoveRegionResponse
moveRegion(RpcController controller
,
1302 MoveRegionRequest req
) throws ServiceException
{
1303 final byte [] encodedRegionName
= req
.getRegion().getValue().toByteArray();
1304 RegionSpecifierType type
= req
.getRegion().getType();
1305 final byte [] destServerName
= (req
.hasDestServerName())?
1306 Bytes
.toBytes(ProtobufUtil
.toServerName(req
.getDestServerName()).getServerName()):null;
1307 MoveRegionResponse mrr
= MoveRegionResponse
.newBuilder().build();
1309 if (type
!= RegionSpecifierType
.ENCODED_REGION_NAME
) {
1310 LOG
.warn("moveRegion specifier type: expected: " + RegionSpecifierType
.ENCODED_REGION_NAME
1311 + " actual: " + type
);
1315 master
.checkInitialized();
1316 master
.move(encodedRegionName
, destServerName
);
1317 } catch (IOException ioe
) {
1318 throw new ServiceException(ioe
);
1324 * Offline specified region from master's in-memory state. It will not attempt to
1325 * reassign the region as in unassign.
1327 * This is a special method that should be used by experts or hbck.
1331 public OfflineRegionResponse
offlineRegion(RpcController controller
,
1332 OfflineRegionRequest request
) throws ServiceException
{
1334 master
.checkInitialized();
1336 final RegionSpecifierType type
= request
.getRegion().getType();
1337 if (type
!= RegionSpecifierType
.REGION_NAME
) {
1338 LOG
.warn("moveRegion specifier type: expected: " + RegionSpecifierType
.REGION_NAME
1339 + " actual: " + type
);
1342 final byte[] regionName
= request
.getRegion().getValue().toByteArray();
1343 final HRegionInfo hri
= master
.getAssignmentManager().getRegionInfo(regionName
);
1344 if (hri
== null) throw new UnknownRegionException(Bytes
.toStringBinary(regionName
));
1346 if (master
.cpHost
!= null) {
1347 master
.cpHost
.preRegionOffline(hri
);
1349 LOG
.info(master
.getClientIdAuditPrefix() + " offline " + hri
.getRegionNameAsString());
1350 master
.getAssignmentManager().offlineRegion(hri
);
1351 if (master
.cpHost
!= null) {
1352 master
.cpHost
.postRegionOffline(hri
);
1354 } catch (IOException ioe
) {
1355 throw new ServiceException(ioe
);
1357 return OfflineRegionResponse
.newBuilder().build();
1361 * Execute Restore/Clone snapshot operation.
1363 * <p>If the specified table exists a "Restore" is executed, replacing the table
1364 * schema and directory data with the content of the snapshot.
1365 * The table must be disabled, or a UnsupportedOperationException will be thrown.
1367 * <p>If the table doesn't exist a "Clone" is executed, a new table is created
1368 * using the schema at the time of the snapshot, and the content of the snapshot.
1370 * <p>The restore/clone operation does not require copying HFiles. Since HFiles
1371 * are immutable the table can point to and use the same files as the original one.
1374 public RestoreSnapshotResponse
restoreSnapshot(RpcController controller
,
1375 RestoreSnapshotRequest request
) throws ServiceException
{
1377 long procId
= master
.restoreSnapshot(request
.getSnapshot(), request
.getNonceGroup(),
1378 request
.getNonce(), request
.getRestoreACL());
1379 return RestoreSnapshotResponse
.newBuilder().setProcId(procId
).build();
1380 } catch (ForeignException e
) {
1381 throw new ServiceException(e
.getCause());
1382 } catch (IOException e
) {
1383 throw new ServiceException(e
);
1388 public RunCatalogScanResponse
runCatalogScan(RpcController c
,
1389 RunCatalogScanRequest req
) throws ServiceException
{
1391 master
.checkInitialized();
1392 return ResponseConverter
.buildRunCatalogScanResponse(master
.catalogJanitorChore
.scan());
1393 } catch (IOException ioe
) {
1394 throw new ServiceException(ioe
);
1399 public RunCleanerChoreResponse
runCleanerChore(RpcController c
, RunCleanerChoreRequest req
)
1400 throws ServiceException
{
1402 master
.checkInitialized();
1403 boolean result
= master
.getHFileCleaner().runCleaner() && master
.getLogCleaner().runCleaner();
1404 return ResponseConverter
.buildRunCleanerChoreResponse(result
);
1405 } catch (IOException ioe
) {
1406 throw new ServiceException(ioe
);
1411 public SetBalancerRunningResponse
setBalancerRunning(RpcController c
,
1412 SetBalancerRunningRequest req
) throws ServiceException
{
1414 master
.checkInitialized();
1415 boolean prevValue
= (req
.getSynchronous())?
1416 synchronousBalanceSwitch(req
.getOn()) : master
.balanceSwitch(req
.getOn());
1417 return SetBalancerRunningResponse
.newBuilder().setPrevBalanceValue(prevValue
).build();
1418 } catch (IOException ioe
) {
1419 throw new ServiceException(ioe
);
1424 public ShutdownResponse
shutdown(RpcController controller
,
1425 ShutdownRequest request
) throws ServiceException
{
1426 LOG
.info(master
.getClientIdAuditPrefix() + " shutdown");
1429 } catch (IOException e
) {
1430 LOG
.error("Exception occurred in HMaster.shutdown()", e
);
1431 throw new ServiceException(e
);
1433 return ShutdownResponse
.newBuilder().build();
1437 * Triggers an asynchronous attempt to take a snapshot.
1441 public SnapshotResponse
snapshot(RpcController controller
,
1442 SnapshotRequest request
) throws ServiceException
{
1444 master
.checkInitialized();
1445 master
.snapshotManager
.checkSnapshotSupport();
1447 LOG
.info(master
.getClientIdAuditPrefix() + " snapshot request for:" +
1448 ClientSnapshotDescriptionUtils
.toString(request
.getSnapshot()));
1449 // get the snapshot information
1450 SnapshotDescription snapshot
= SnapshotDescriptionUtils
.validate(
1451 request
.getSnapshot(), master
.getConfiguration());
1452 master
.snapshotManager
.takeSnapshot(snapshot
);
1454 // send back the max amount of time the client should wait for the snapshot to complete
1455 long waitTime
= SnapshotDescriptionUtils
.getMaxMasterTimeout(master
.getConfiguration(),
1456 snapshot
.getType(), SnapshotDescriptionUtils
.DEFAULT_MAX_WAIT_TIME
);
1457 return SnapshotResponse
.newBuilder().setExpectedTimeout(waitTime
).build();
1458 } catch (ForeignException e
) {
1459 throw new ServiceException(e
.getCause());
1460 } catch (IOException e
) {
1461 throw new ServiceException(e
);
1466 public StopMasterResponse
stopMaster(RpcController controller
,
1467 StopMasterRequest request
) throws ServiceException
{
1468 LOG
.info(master
.getClientIdAuditPrefix() + " stop");
1470 master
.stopMaster();
1471 } catch (IOException e
) {
1472 LOG
.error("Exception occurred while stopping master", e
);
1473 throw new ServiceException(e
);
1475 return StopMasterResponse
.newBuilder().build();
1479 public IsInMaintenanceModeResponse
isMasterInMaintenanceMode(
1480 final RpcController controller
,
1481 final IsInMaintenanceModeRequest request
) throws ServiceException
{
1482 IsInMaintenanceModeResponse
.Builder response
= IsInMaintenanceModeResponse
.newBuilder();
1483 response
.setInMaintenanceMode(master
.isInMaintenanceMode());
1484 return response
.build();
1488 public UnassignRegionResponse
unassignRegion(RpcController controller
,
1489 UnassignRegionRequest req
) throws ServiceException
{
1491 final byte [] regionName
= req
.getRegion().getValue().toByteArray();
1492 RegionSpecifierType type
= req
.getRegion().getType();
1493 final boolean force
= req
.getForce();
1494 UnassignRegionResponse urr
= UnassignRegionResponse
.newBuilder().build();
1496 master
.checkInitialized();
1497 if (type
!= RegionSpecifierType
.REGION_NAME
) {
1498 LOG
.warn("unassignRegion specifier type: expected: " + RegionSpecifierType
.REGION_NAME
1499 + " actual: " + type
);
1501 Pair
<HRegionInfo
, ServerName
> pair
=
1502 MetaTableAccessor
.getRegion(master
.getConnection(), regionName
);
1503 if (Bytes
.equals(HRegionInfo
.FIRST_META_REGIONINFO
.getRegionName(),regionName
)) {
1504 pair
= new Pair
<>(HRegionInfo
.FIRST_META_REGIONINFO
,
1505 master
.getMetaTableLocator().getMetaRegionLocation(master
.getZooKeeper()));
1508 throw new UnknownRegionException(Bytes
.toString(regionName
));
1511 HRegionInfo hri
= pair
.getFirst();
1512 if (master
.cpHost
!= null) {
1513 if (master
.cpHost
.preUnassign(hri
, force
)) {
1517 LOG
.debug(master
.getClientIdAuditPrefix() + " unassign " + hri
.getRegionNameAsString()
1518 + " in current location if it is online and reassign.force=" + force
);
1519 master
.getAssignmentManager().unassign(hri
);
1520 if (master
.cpHost
!= null) {
1521 master
.cpHost
.postUnassign(hri
, force
);
1525 } catch (IOException ioe
) {
1526 throw new ServiceException(ioe
);
1531 public ReportRegionStateTransitionResponse
reportRegionStateTransition(RpcController c
,
1532 ReportRegionStateTransitionRequest req
) throws ServiceException
{
1534 master
.checkServiceStarted();
1535 return master
.getAssignmentManager().reportRegionStateTransition(req
);
1536 } catch (IOException ioe
) {
1537 throw new ServiceException(ioe
);
1542 public SetQuotaResponse
setQuota(RpcController c
, SetQuotaRequest req
)
1543 throws ServiceException
{
1545 master
.checkInitialized();
1546 return master
.getMasterQuotaManager().setQuota(req
);
1547 } catch (Exception e
) {
1548 throw new ServiceException(e
);
1553 public MajorCompactionTimestampResponse
getLastMajorCompactionTimestamp(RpcController controller
,
1554 MajorCompactionTimestampRequest request
) throws ServiceException
{
1555 MajorCompactionTimestampResponse
.Builder response
=
1556 MajorCompactionTimestampResponse
.newBuilder();
1558 master
.checkInitialized();
1559 response
.setCompactionTimestamp(master
.getLastMajorCompactionTimestamp(ProtobufUtil
1560 .toTableName(request
.getTableName())));
1561 } catch (IOException e
) {
1562 throw new ServiceException(e
);
1564 return response
.build();
1568 public MajorCompactionTimestampResponse
getLastMajorCompactionTimestampForRegion(
1569 RpcController controller
, MajorCompactionTimestampForRegionRequest request
)
1570 throws ServiceException
{
1571 MajorCompactionTimestampResponse
.Builder response
=
1572 MajorCompactionTimestampResponse
.newBuilder();
1574 master
.checkInitialized();
1575 response
.setCompactionTimestamp(master
.getLastMajorCompactionTimestampForRegion(request
1576 .getRegion().getValue().toByteArray()));
1577 } catch (IOException e
) {
1578 throw new ServiceException(e
);
1580 return response
.build();
1584 * Compact a region on the master.
1586 * @param controller the RPC controller
1587 * @param request the request
1588 * @throws ServiceException
1591 @QosPriority(priority
=HConstants
.ADMIN_QOS
)
1592 public CompactRegionResponse
compactRegion(final RpcController controller
,
1593 final CompactRegionRequest request
) throws ServiceException
{
1595 master
.checkInitialized();
1596 byte[] regionName
= request
.getRegion().getValue().toByteArray();
1597 TableName tableName
= HRegionInfo
.getTable(regionName
);
1598 // if the region is a mob region, do the mob file compaction.
1599 if (MobUtils
.isMobRegionName(tableName
, regionName
)) {
1600 return compactMob(request
, tableName
);
1602 return super.compactRegion(controller
, request
);
1604 } catch (IOException ie
) {
1605 throw new ServiceException(ie
);
1610 @QosPriority(priority
=HConstants
.ADMIN_QOS
)
1611 public GetRegionInfoResponse
getRegionInfo(final RpcController controller
,
1612 final GetRegionInfoRequest request
) throws ServiceException
{
1613 byte[] regionName
= request
.getRegion().getValue().toByteArray();
1614 TableName tableName
= HRegionInfo
.getTable(regionName
);
1615 if (MobUtils
.isMobRegionName(tableName
, regionName
)) {
1616 // a dummy region info contains the compaction state.
1617 HRegionInfo mobRegionInfo
= MobUtils
.getMobRegionInfo(tableName
);
1618 GetRegionInfoResponse
.Builder builder
= GetRegionInfoResponse
.newBuilder();
1619 builder
.setRegionInfo(HRegionInfo
.convert(mobRegionInfo
));
1620 if (request
.hasCompactionState() && request
.getCompactionState()) {
1621 builder
.setCompactionState(master
.getMobCompactionState(tableName
));
1623 return builder
.build();
1625 return super.getRegionInfo(controller
, request
);
1630 * Compacts the mob files in the current table.
1631 * @param request the request.
1632 * @param tableName the current table name.
1633 * @return The response of the mob file compaction.
1634 * @throws IOException
1636 private CompactRegionResponse
compactMob(final CompactRegionRequest request
,
1637 TableName tableName
) throws IOException
{
1638 if (!master
.getTableStateManager().isTableState(tableName
, TableState
.State
.ENABLED
)) {
1639 throw new DoNotRetryIOException("Table " + tableName
+ " is not enabled");
1641 boolean allFiles
= false;
1642 List
<ColumnFamilyDescriptor
> compactedColumns
= new ArrayList
<>();
1643 ColumnFamilyDescriptor
[] hcds
= master
.getTableDescriptors().get(tableName
).getColumnFamilies();
1644 byte[] family
= null;
1645 if (request
.hasFamily()) {
1646 family
= request
.getFamily().toByteArray();
1647 for (ColumnFamilyDescriptor hcd
: hcds
) {
1648 if (Bytes
.equals(family
, hcd
.getName())) {
1649 if (!hcd
.isMobEnabled()) {
1650 LOG
.error("Column family " + hcd
.getNameAsString() + " is not a mob column family");
1651 throw new DoNotRetryIOException("Column family " + hcd
.getNameAsString()
1652 + " is not a mob column family");
1654 compactedColumns
.add(hcd
);
1658 for (ColumnFamilyDescriptor hcd
: hcds
) {
1659 if (hcd
.isMobEnabled()) {
1660 compactedColumns
.add(hcd
);
1664 if (compactedColumns
.isEmpty()) {
1665 LOG
.error("No mob column families are assigned in the mob compaction");
1666 throw new DoNotRetryIOException(
1667 "No mob column families are assigned in the mob compaction");
1669 if (request
.hasMajor() && request
.getMajor()) {
1672 String familyLogMsg
= (family
!= null) ? Bytes
.toString(family
) : "";
1673 if (LOG
.isTraceEnabled()) {
1674 LOG
.trace("User-triggered mob compaction requested for table: "
1675 + tableName
.getNameAsString() + " for column family: " + familyLogMsg
);
1677 master
.requestMobCompaction(tableName
, compactedColumns
, allFiles
);
1678 return CompactRegionResponse
.newBuilder().build();
1682 public IsBalancerEnabledResponse
isBalancerEnabled(RpcController controller
,
1683 IsBalancerEnabledRequest request
) throws ServiceException
{
1684 IsBalancerEnabledResponse
.Builder response
= IsBalancerEnabledResponse
.newBuilder();
1685 response
.setEnabled(master
.isBalancerOn());
1686 return response
.build();
1690 public SetSplitOrMergeEnabledResponse
setSplitOrMergeEnabled(RpcController controller
,
1691 SetSplitOrMergeEnabledRequest request
) throws ServiceException
{
1692 SetSplitOrMergeEnabledResponse
.Builder response
= SetSplitOrMergeEnabledResponse
.newBuilder();
1694 master
.checkInitialized();
1695 boolean newValue
= request
.getEnabled();
1696 for (MasterProtos
.MasterSwitchType masterSwitchType
: request
.getSwitchTypesList()) {
1697 MasterSwitchType switchType
= convert(masterSwitchType
);
1698 boolean oldValue
= master
.isSplitOrMergeEnabled(switchType
);
1699 response
.addPrevValue(oldValue
);
1700 boolean bypass
= false;
1701 if (master
.cpHost
!= null) {
1702 bypass
= master
.cpHost
.preSetSplitOrMergeEnabled(newValue
, switchType
);
1705 master
.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue
, switchType
);
1707 if (master
.cpHost
!= null) {
1708 master
.cpHost
.postSetSplitOrMergeEnabled(newValue
, switchType
);
1711 } catch (IOException e
) {
1712 throw new ServiceException(e
);
1713 } catch (KeeperException e
) {
1714 throw new ServiceException(e
);
1716 return response
.build();
1720 public IsSplitOrMergeEnabledResponse
isSplitOrMergeEnabled(RpcController controller
,
1721 IsSplitOrMergeEnabledRequest request
) throws ServiceException
{
1722 IsSplitOrMergeEnabledResponse
.Builder response
= IsSplitOrMergeEnabledResponse
.newBuilder();
1723 response
.setEnabled(master
.isSplitOrMergeEnabled(convert(request
.getSwitchType())));
1724 return response
.build();
1728 public NormalizeResponse
normalize(RpcController controller
,
1729 NormalizeRequest request
) throws ServiceException
{
1731 return NormalizeResponse
.newBuilder().setNormalizerRan(master
.normalizeRegions()).build();
1732 } catch (IOException ex
) {
1733 throw new ServiceException(ex
);
1738 public SetNormalizerRunningResponse
setNormalizerRunning(RpcController controller
,
1739 SetNormalizerRunningRequest request
) throws ServiceException
{
1741 master
.checkInitialized();
1742 boolean prevValue
= normalizerSwitch(request
.getOn());
1743 return SetNormalizerRunningResponse
.newBuilder().setPrevNormalizerValue(prevValue
).build();
1744 } catch (IOException ioe
) {
1745 throw new ServiceException(ioe
);
1750 public IsNormalizerEnabledResponse
isNormalizerEnabled(RpcController controller
,
1751 IsNormalizerEnabledRequest request
) throws ServiceException
{
1752 IsNormalizerEnabledResponse
.Builder response
= IsNormalizerEnabledResponse
.newBuilder();
1753 response
.setEnabled(master
.isNormalizerOn());
1754 return response
.build();
1758 * Returns the security capabilities in effect on the cluster
1761 public SecurityCapabilitiesResponse
getSecurityCapabilities(RpcController controller
,
1762 SecurityCapabilitiesRequest request
) throws ServiceException
{
1763 SecurityCapabilitiesResponse
.Builder response
= SecurityCapabilitiesResponse
.newBuilder();
1765 master
.checkInitialized();
1766 Set
<SecurityCapabilitiesResponse
.Capability
> capabilities
= new HashSet
<>();
1768 if (User
.isHBaseSecurityEnabled(master
.getConfiguration())) {
1769 capabilities
.add(SecurityCapabilitiesResponse
.Capability
.SECURE_AUTHENTICATION
);
1771 capabilities
.add(SecurityCapabilitiesResponse
.Capability
.SIMPLE_AUTHENTICATION
);
1773 // The AccessController can provide AUTHORIZATION and CELL_AUTHORIZATION
1774 if (master
.cpHost
!= null &&
1775 master
.cpHost
.findCoprocessor(AccessController
.class.getName()) != null) {
1776 if (AccessController
.isAuthorizationSupported(master
.getConfiguration())) {
1777 capabilities
.add(SecurityCapabilitiesResponse
.Capability
.AUTHORIZATION
);
1779 if (AccessController
.isCellAuthorizationSupported(master
.getConfiguration())) {
1780 capabilities
.add(SecurityCapabilitiesResponse
.Capability
.CELL_AUTHORIZATION
);
1783 // The VisibilityController can provide CELL_VISIBILITY
1784 if (master
.cpHost
!= null &&
1785 master
.cpHost
.findCoprocessor(VisibilityController
.class.getName()) != null) {
1786 if (VisibilityController
.isCellAuthorizationSupported(master
.getConfiguration())) {
1787 capabilities
.add(SecurityCapabilitiesResponse
.Capability
.CELL_VISIBILITY
);
1790 response
.addAllCapabilities(capabilities
);
1791 } catch (IOException e
) {
1792 throw new ServiceException(e
);
1794 return response
.build();
1797 private MasterSwitchType
convert(MasterProtos
.MasterSwitchType switchType
) {
1798 switch (switchType
) {
1800 return MasterSwitchType
.SPLIT
;
1802 return MasterSwitchType
.MERGE
;
1810 public AddReplicationPeerResponse
addReplicationPeer(RpcController controller
,
1811 AddReplicationPeerRequest request
) throws ServiceException
{
1813 master
.addReplicationPeer(request
.getPeerId(),
1814 ReplicationSerDeHelper
.convert(request
.getPeerConfig()));
1815 return AddReplicationPeerResponse
.newBuilder().build();
1816 } catch (ReplicationException
| IOException e
) {
1817 throw new ServiceException(e
);
1822 public RemoveReplicationPeerResponse
removeReplicationPeer(RpcController controller
,
1823 RemoveReplicationPeerRequest request
) throws ServiceException
{
1825 master
.removeReplicationPeer(request
.getPeerId());
1826 return RemoveReplicationPeerResponse
.newBuilder().build();
1827 } catch (ReplicationException
| IOException e
) {
1828 throw new ServiceException(e
);
1833 public EnableReplicationPeerResponse
enableReplicationPeer(RpcController controller
,
1834 EnableReplicationPeerRequest request
) throws ServiceException
{
1836 master
.enableReplicationPeer(request
.getPeerId());
1837 return EnableReplicationPeerResponse
.newBuilder().build();
1838 } catch (ReplicationException
| IOException e
) {
1839 throw new ServiceException(e
);
1844 public DisableReplicationPeerResponse
disableReplicationPeer(RpcController controller
,
1845 DisableReplicationPeerRequest request
) throws ServiceException
{
1847 master
.disableReplicationPeer(request
.getPeerId());
1848 return DisableReplicationPeerResponse
.newBuilder().build();
1849 } catch (ReplicationException
| IOException e
) {
1850 throw new ServiceException(e
);
1855 public GetReplicationPeerConfigResponse
getReplicationPeerConfig(RpcController controller
,
1856 GetReplicationPeerConfigRequest request
) throws ServiceException
{
1857 GetReplicationPeerConfigResponse
.Builder response
= GetReplicationPeerConfigResponse
1860 String peerId
= request
.getPeerId();
1861 ReplicationPeerConfig peerConfig
= master
.getReplicationPeerConfig(peerId
);
1862 response
.setPeerId(peerId
);
1863 response
.setPeerConfig(ReplicationSerDeHelper
.convert(peerConfig
));
1864 } catch (ReplicationException
| IOException e
) {
1865 throw new ServiceException(e
);
1867 return response
.build();
1871 public UpdateReplicationPeerConfigResponse
updateReplicationPeerConfig(RpcController controller
,
1872 UpdateReplicationPeerConfigRequest request
) throws ServiceException
{
1874 master
.updateReplicationPeerConfig(request
.getPeerId(),
1875 ReplicationSerDeHelper
.convert(request
.getPeerConfig()));
1876 return UpdateReplicationPeerConfigResponse
.newBuilder().build();
1877 } catch (ReplicationException
| IOException e
) {
1878 throw new ServiceException(e
);
1883 public ListReplicationPeersResponse
listReplicationPeers(RpcController controller
,
1884 ListReplicationPeersRequest request
) throws ServiceException
{
1885 ListReplicationPeersResponse
.Builder response
= ListReplicationPeersResponse
.newBuilder();
1887 List
<ReplicationPeerDescription
> peers
= master
1888 .listReplicationPeers(request
.hasRegex() ? request
.getRegex() : null);
1889 for (ReplicationPeerDescription peer
: peers
) {
1890 response
.addPeerDesc(ReplicationSerDeHelper
.toProtoReplicationPeerDescription(peer
));
1892 } catch (ReplicationException
| IOException e
) {
1893 throw new ServiceException(e
);
1895 return response
.build();
1899 public ListDrainingRegionServersResponse
listDrainingRegionServers(RpcController controller
,
1900 ListDrainingRegionServersRequest request
) throws ServiceException
{
1901 ListDrainingRegionServersResponse
.Builder response
=
1902 ListDrainingRegionServersResponse
.newBuilder();
1904 master
.checkInitialized();
1905 List
<ServerName
> servers
= master
.listDrainingRegionServers();
1906 for (ServerName server
: servers
) {
1907 response
.addServerName(ProtobufUtil
.toServerName(server
));
1909 } catch (IOException io
) {
1910 throw new ServiceException(io
);
1913 return response
.build();
1917 public DrainRegionServersResponse
drainRegionServers(RpcController controller
,
1918 DrainRegionServersRequest request
) throws ServiceException
{
1919 DrainRegionServersResponse
.Builder response
= DrainRegionServersResponse
.newBuilder();
1921 master
.checkInitialized();
1922 for (HBaseProtos
.ServerName pbServer
: request
.getServerNameList()) {
1923 master
.drainRegionServer(ProtobufUtil
.toServerName(pbServer
));
1925 } catch (IOException io
) {
1926 throw new ServiceException(io
);
1929 return response
.build();
1933 public RemoveDrainFromRegionServersResponse
removeDrainFromRegionServers(RpcController controller
,
1934 RemoveDrainFromRegionServersRequest request
) throws ServiceException
{
1935 RemoveDrainFromRegionServersResponse
.Builder response
=
1936 RemoveDrainFromRegionServersResponse
.newBuilder();
1938 master
.checkInitialized();
1939 for (HBaseProtos
.ServerName pbServer
: request
.getServerNameList()) {
1940 master
.removeDrainFromRegionServer(ProtobufUtil
.toServerName(pbServer
));
1942 } catch (IOException io
) {
1943 throw new ServiceException(io
);
1946 return response
.build();
1950 public LockResponse
requestLock(RpcController controller
, final LockRequest request
)
1951 throws ServiceException
{
1953 if (request
.getDescription().isEmpty()) {
1954 throw new IllegalArgumentException("Empty description");
1956 NonceProcedureRunnable npr
;
1957 LockType type
= LockType
.valueOf(request
.getLockType().name());
1958 if (request
.getRegionInfoCount() > 0) {
1959 final HRegionInfo
[] regionInfos
= new HRegionInfo
[request
.getRegionInfoCount()];
1960 for (int i
= 0; i
< request
.getRegionInfoCount(); ++i
) {
1961 regionInfos
[i
] = HRegionInfo
.convert(request
.getRegionInfo(i
));
1963 npr
= new NonceProcedureRunnable(master
, request
.getNonceGroup(), request
.getNonce()) {
1965 protected void run() throws IOException
{
1966 setProcId(master
.getLockManager().remoteLocks().requestRegionsLock(regionInfos
,
1967 request
.getDescription(), getNonceKey()));
1971 protected String
getDescription() {
1972 return "RequestLock";
1975 } else if (request
.hasTableName()) {
1976 final TableName tableName
= ProtobufUtil
.toTableName(request
.getTableName());
1977 npr
= new NonceProcedureRunnable(master
, request
.getNonceGroup(), request
.getNonce()) {
1979 protected void run() throws IOException
{
1980 setProcId(master
.getLockManager().remoteLocks().requestTableLock(tableName
, type
,
1981 request
.getDescription(), getNonceKey()));
1985 protected String
getDescription() {
1986 return "RequestLock";
1989 } else if (request
.hasNamespace()) {
1990 npr
= new NonceProcedureRunnable(master
, request
.getNonceGroup(), request
.getNonce()) {
1992 protected void run() throws IOException
{
1993 setProcId(master
.getLockManager().remoteLocks().requestNamespaceLock(
1994 request
.getNamespace(), type
, request
.getDescription(), getNonceKey()));
1998 protected String
getDescription() {
1999 return "RequestLock";
2003 throw new IllegalArgumentException("one of table/namespace/region should be specified");
2005 long procId
= MasterProcedureUtil
.submitProcedure(npr
);
2006 return LockResponse
.newBuilder().setProcId(procId
).build();
2007 } catch (IllegalArgumentException e
) {
2008 LOG
.warn("Exception when queuing lock", e
);
2009 throw new ServiceException(new DoNotRetryIOException(e
));
2010 } catch (IOException e
) {
2011 LOG
.warn("Exception when queuing lock", e
);
2012 throw new ServiceException(e
);
2017 * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
2018 * @throws ServiceException if given proc id is found but it is not a LockProcedure.
2021 public LockHeartbeatResponse
lockHeartbeat(RpcController controller
, LockHeartbeatRequest request
)
2022 throws ServiceException
{
2024 if (master
.getLockManager().remoteLocks().lockHeartbeat(request
.getProcId(),
2025 request
.getKeepAlive())) {
2026 return LockHeartbeatResponse
.newBuilder().setTimeoutMs(
2027 master
.getConfiguration().getInt(LockProcedure
.REMOTE_LOCKS_TIMEOUT_MS_CONF
,
2028 LockProcedure
.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS
))
2029 .setLockStatus(LockHeartbeatResponse
.LockStatus
.LOCKED
).build();
2031 return LockHeartbeatResponse
.newBuilder()
2032 .setLockStatus(LockHeartbeatResponse
.LockStatus
.UNLOCKED
).build();
2034 } catch (IOException e
) {
2035 throw new ServiceException(e
);
2040 public RegionSpaceUseReportResponse
reportRegionSpaceUse(RpcController controller
,
2041 RegionSpaceUseReportRequest request
) throws ServiceException
{
2043 master
.checkInitialized();
2044 if (!QuotaUtil
.isQuotaEnabled(master
.getConfiguration())) {
2045 return RegionSpaceUseReportResponse
.newBuilder().build();
2047 MasterQuotaManager quotaManager
= this.master
.getMasterQuotaManager();
2048 final long now
= EnvironmentEdgeManager
.currentTime();
2049 for (RegionSpaceUse report
: request
.getSpaceUseList()) {
2050 quotaManager
.addRegionSize(HRegionInfo
.convert(
2051 report
.getRegionInfo()), report
.getRegionSize(), now
);
2053 return RegionSpaceUseReportResponse
.newBuilder().build();
2054 } catch (Exception e
) {
2055 throw new ServiceException(e
);
2060 public GetSpaceQuotaRegionSizesResponse
getSpaceQuotaRegionSizes(
2061 RpcController controller
, GetSpaceQuotaRegionSizesRequest request
) throws ServiceException
{
2063 master
.checkInitialized();
2064 MasterQuotaManager quotaManager
= this.master
.getMasterQuotaManager();
2065 GetSpaceQuotaRegionSizesResponse
.Builder builder
=
2066 GetSpaceQuotaRegionSizesResponse
.newBuilder();
2067 if (quotaManager
!= null) {
2068 Map
<HRegionInfo
,Long
> regionSizes
= quotaManager
.snapshotRegionSizes();
2069 Map
<TableName
,Long
> regionSizesByTable
= new HashMap
<>();
2070 // Translate hregioninfo+long -> tablename+long
2071 for (Entry
<HRegionInfo
,Long
> entry
: regionSizes
.entrySet()) {
2072 final TableName tableName
= entry
.getKey().getTable();
2073 Long prevSize
= regionSizesByTable
.get(tableName
);
2074 if (prevSize
== null) {
2077 regionSizesByTable
.put(tableName
, prevSize
+ entry
.getValue());
2079 // Serialize them into the protobuf
2080 for (Entry
<TableName
,Long
> tableSize
: regionSizesByTable
.entrySet()) {
2081 builder
.addSizes(RegionSizes
.newBuilder()
2082 .setTableName(ProtobufUtil
.toProtoTableName(tableSize
.getKey()))
2083 .setSize(tableSize
.getValue()).build());
2085 return builder
.build();
2087 return builder
.build();
2088 } catch (Exception e
) {
2089 throw new ServiceException(e
);
2094 public GetQuotaStatesResponse
getQuotaStates(
2095 RpcController controller
, GetQuotaStatesRequest request
) throws ServiceException
{
2097 master
.checkInitialized();
2098 QuotaObserverChore quotaChore
= this.master
.getQuotaObserverChore();
2099 GetQuotaStatesResponse
.Builder builder
= GetQuotaStatesResponse
.newBuilder();
2100 if (quotaChore
!= null) {
2101 // The "current" view of all tables with quotas
2102 Map
<TableName
, SpaceQuotaSnapshot
> tableSnapshots
= quotaChore
.getTableQuotaSnapshots();
2103 for (Entry
<TableName
, SpaceQuotaSnapshot
> entry
: tableSnapshots
.entrySet()) {
2104 builder
.addTableSnapshots(
2105 TableQuotaSnapshot
.newBuilder()
2106 .setTableName(ProtobufUtil
.toProtoTableName(entry
.getKey()))
2107 .setSnapshot(SpaceQuotaSnapshot
.toProtoSnapshot(entry
.getValue())).build());
2109 // The "current" view of all namespaces with quotas
2110 Map
<String
, SpaceQuotaSnapshot
> nsSnapshots
= quotaChore
.getNamespaceQuotaSnapshots();
2111 for (Entry
<String
, SpaceQuotaSnapshot
> entry
: nsSnapshots
.entrySet()) {
2112 builder
.addNsSnapshots(
2113 NamespaceQuotaSnapshot
.newBuilder()
2114 .setNamespace(entry
.getKey())
2115 .setSnapshot(SpaceQuotaSnapshot
.toProtoSnapshot(entry
.getValue())).build());
2117 return builder
.build();
2119 return builder
.build();
2120 } catch (Exception e
) {
2121 throw new ServiceException(e
);