Skip to content

Commit 02c82a2

Browse files
committed
HBASE-22776 Rename config names in user scan snapshot feature
1 parent 0867714 commit 02c82a2

File tree

5 files changed

+568
-275
lines changed

5 files changed

+568
-275
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@
7171
import org.apache.hadoop.hbase.security.AccessDeniedException;
7272
import org.apache.hadoop.hbase.security.User;
7373
import org.apache.hadoop.hbase.security.access.AccessChecker;
74+
import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclCleaner;
75+
import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper;
7476
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
7577
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
7678
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
@@ -1123,6 +1125,10 @@ private void checkSnapshotSupport(final Configuration conf, final MasterFileSyst
11231125
// Inject snapshot cleaners, if snapshot.enable is true
11241126
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
11251127
hfileCleaners.add(HFileLinkCleaner.class.getName());
1128+
// If sync acl to HDFS feature is enabled, then inject the cleaner
1129+
if (SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf)) {
1130+
hfileCleaners.add(SnapshotScannerHDFSAclCleaner.class.getName());
1131+
}
11261132

11271133
// Set cleaners conf
11281134
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,

hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
import org.apache.hadoop.hbase.HConstants;
2828
import org.apache.hadoop.hbase.MetaTableAccessor;
2929
import org.apache.hadoop.hbase.TableName;
30-
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
3130
import org.apache.hadoop.hbase.master.HMaster;
3231
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
3332
import org.apache.yetus.audience.InterfaceAudience;
@@ -59,7 +58,7 @@ public void init(Map<String, Object> params) {
5958
@Override
6059
public void setConf(Configuration conf) {
6160
super.setConf(conf);
62-
userScanSnapshotEnabled = isUserScanSnapshotEnabled(conf);
61+
userScanSnapshotEnabled = SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf);
6362
}
6463

6564
@Override
@@ -82,13 +81,6 @@ public boolean isEmptyDirDeletable(Path dir) {
8281
return true;
8382
}
8483

85-
private boolean isUserScanSnapshotEnabled(Configuration conf) {
86-
String masterCoprocessors = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
87-
return conf.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)
88-
&& masterCoprocessors.contains(SnapshotScannerHDFSAclController.class.getName())
89-
&& masterCoprocessors.contains(AccessController.class.getName());
90-
}
91-
9284
private boolean isEmptyArchiveDirDeletable(Path dir) {
9385
try {
9486
if (isArchiveDataDir(dir)) {

hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java

Lines changed: 32 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ public Optional<MasterObserver> getMasterObserver() {
119119
public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> c)
120120
throws IOException {
121121
if (c.getEnvironment().getConfiguration()
122-
.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)) {
122+
.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) {
123123
MasterCoprocessorEnvironment mEnv = c.getEnvironment();
124124
if (!(mEnv instanceof HasMasterServices)) {
125125
throw new IOException("Does not implement HMasterServices");
@@ -133,7 +133,7 @@ public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment
133133
userProvider = UserProvider.instantiate(c.getEnvironment().getConfiguration());
134134
} else {
135135
LOG.warn("Try to initialize the coprocessor SnapshotScannerHDFSAclController but failure "
136-
+ "because the config " + SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE
136+
+ "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE
137137
+ " is false.");
138138
}
139139
}
@@ -213,6 +213,7 @@ public void postCompletedSnapshotAction(ObserverContext<MasterCoprocessorEnviron
213213
public void postCompletedTruncateTableAction(ObserverContext<MasterCoprocessorEnvironment> c,
214214
TableName tableName) throws IOException {
215215
if (needHandleTableHdfsAcl(tableName, "truncateTable " + tableName)) {
216+
hdfsAclHelper.createTableDirectories(tableName);
216217
// Since the table directories is recreated, so add HDFS acls again
217218
Set<String> users = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false);
218219
hdfsAclHelper.addTableAcl(tableName, users, "truncate");
@@ -233,9 +234,11 @@ public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
233234
try (Table aclTable =
234235
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
235236
Set<String> users = SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName);
236-
// 1. Delete table owner permission is synced to HDFS in acl table
237+
// 1. Remove table archive directory default ACLs
238+
hdfsAclHelper.removeTableDefaultAcl(tableName, users);
239+
// 2. Delete table owner permission is synced to HDFS in acl table
237240
SnapshotScannerHDFSAclStorage.deleteTableHdfsAcl(aclTable, tableName);
238-
// 2. Remove namespace access acls
241+
// 3. Remove namespace access acls
239242
Set<String> removeUsers = filterUsersToRemoveNsAccessAcl(aclTable, tableName, users);
240243
if (removeUsers.size() > 0) {
241244
hdfsAclHelper.removeNamespaceAccessAcl(tableName, removeUsers, "delete");
@@ -251,7 +254,7 @@ public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
251254
try (Table aclTable =
252255
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
253256
if (needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName)
254-
&& !hdfsAclHelper.isTableUserScanSnapshotEnabled(oldDescriptor)) {
257+
&& !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor)) {
255258
// 1. Create table directories used for acl inherited
256259
hdfsAclHelper.createTableDirectories(tableName);
257260
// 2. Add table users HDFS acls
@@ -264,7 +267,7 @@ public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
264267
SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(ctx.getEnvironment().getConnection(),
265268
tableUsers, tableName);
266269
} else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName)
267-
&& !hdfsAclHelper.isTableUserScanSnapshotEnabled(currentDescriptor)) {
270+
&& !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) {
268271
// 1. Remove empty table directories
269272
List<Path> tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false);
270273
for (Path path : tableRootPaths) {
@@ -290,17 +293,24 @@ public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
290293
public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
291294
String namespace) throws IOException {
292295
if (checkInitialized("deleteNamespace " + namespace)) {
293-
// 1. Record namespace user acl is not synced to HDFS
294-
SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(),
295-
namespace);
296-
// 2. Delete tmp namespace directory
297-
/**
298-
* Delete namespace tmp directory because it's created by this coprocessor when namespace is
299-
* created to make namespace default acl can be inherited by tables. The namespace data
300-
* directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is
301-
* deleted by HFileCleaner.
302-
*/
303-
hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace));
296+
try (Table aclTable =
297+
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
298+
// 1. Delete namespace archive dir default ACLs
299+
Set<String> users = SnapshotScannerHDFSAclStorage.getEntryUsers(aclTable,
300+
PermissionStorage.toNamespaceEntry(Bytes.toBytes(namespace)));
301+
hdfsAclHelper.removeNamespaceDefaultAcl(namespace, users);
302+
// 2. Record namespace user acl is not synced to HDFS
303+
SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(),
304+
namespace);
305+
// 3. Delete tmp namespace directory
306+
/**
307+
* Delete namespace tmp directory because it's created by this coprocessor when namespace is
308+
* created to make namespace default acl can be inherited by tables. The namespace data
309+
* directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is
310+
* deleted by HFileCleaner.
311+
*/
312+
hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace));
313+
}
304314
}
305315
}
306316

@@ -364,7 +374,9 @@ public void postGrant(ObserverContext<MasterCoprocessorEnvironment> c,
364374
UserPermission tPerm = getUserTablePermission(conf, userName, tableName);
365375
if (tPerm != null && hdfsAclHelper.containReadAction(tPerm)) {
366376
if (!isHdfsAclSet(aclTable, userName, tableName)) {
367-
// 1. Add HDFS acl
377+
// 1. create table dirs
378+
hdfsAclHelper.createTableDirectories(tableName);
379+
// 2. Add HDFS acl
368380
hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), new HashSet<>(0));
369381
}
370382
// 2. Record table acl is synced to HDFS
@@ -547,13 +559,13 @@ private boolean needHandleTableHdfsAcl(TablePermission tablePermission) throws I
547559

548560
private boolean needHandleTableHdfsAcl(TableName tableName, String operation) throws IOException {
549561
return !tableName.isSystemTable() && checkInitialized(operation) && hdfsAclHelper
550-
.isTableUserScanSnapshotEnabled(masterServices.getTableDescriptors().get(tableName));
562+
.isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName));
551563
}
552564

553565
private boolean needHandleTableHdfsAcl(TableDescriptor tableDescriptor, String operation) {
554566
TableName tableName = tableDescriptor.getTableName();
555567
return !tableName.isSystemTable() && checkInitialized(operation)
556-
&& hdfsAclHelper.isTableUserScanSnapshotEnabled(tableDescriptor);
568+
&& hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor);
557569
}
558570

559571
private User getActiveUser(ObserverContext<?> ctx) throws IOException {

hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java

Lines changed: 67 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import java.io.FileNotFoundException;
2929
import java.io.IOException;
3030
import java.util.ArrayList;
31+
import java.util.Collections;
3132
import java.util.HashSet;
3233
import java.util.List;
3334
import java.util.Map;
@@ -53,6 +54,7 @@
5354
import org.apache.hadoop.hbase.client.Connection;
5455
import org.apache.hadoop.hbase.client.SnapshotDescription;
5556
import org.apache.hadoop.hbase.client.TableDescriptor;
57+
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
5658
import org.apache.hadoop.hbase.mob.MobUtils;
5759
import org.apache.hadoop.hbase.util.Bytes;
5860
import org.apache.yetus.audience.InterfaceAudience;
@@ -71,23 +73,23 @@
7173
public class SnapshotScannerHDFSAclHelper implements Closeable {
7274
private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
7375

74-
public static final String USER_SCAN_SNAPSHOT_ENABLE = "hbase.user.scan.snapshot.enable";
75-
public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER =
76-
"hbase.user.scan.snapshot.thread.number";
76+
public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
77+
public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
78+
"hbase.acl.sync.to.hdfs.thread.number";
7779
// The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
7880
public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
7981
public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
8082
"/hbase/.tmpdir-to-restore-snapshot";
8183
// The default permission of the common directories if the feature is enabled.
8284
public static final String COMMON_DIRECTORY_PERMISSION =
83-
"hbase.user.scan.snapshot.common.directory.permission";
85+
"hbase.acl.sync.to.hdfs.common.directory.permission";
8486
// The secure HBase permission is 700, 751 means all others have execute access and the mask is
8587
// set to read-execute to make the extended access ACL entries can work. Be cautious to set
8688
// this value.
8789
public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
8890
// The default permission of the snapshot restore directories if the feature is enabled.
8991
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
90-
"hbase.user.scan.snapshot.restore.directory.permission";
92+
"hbase.acl.sync.to.hdfs.restore.directory.permission";
9193
// 753 means all others have write-execute access.
9294
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
9395

@@ -102,7 +104,7 @@ public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection conn
102104
this.conf = configuration;
103105
this.pathHelper = new PathHelper(conf);
104106
this.fs = pathHelper.getFileSystem();
105-
this.pool = Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10),
107+
this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
106108
new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
107109
this.admin = connection.getAdmin();
108110
}
@@ -230,6 +232,50 @@ public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeU
230232
}
231233
}
232234

235+
/**
236+
* Remove default acl from namespace archive dir when delete namespace
237+
* @param namespace the namespace
238+
* @param removeUsers the users whose default acl will be removed
239+
* @return false if an error occurred, otherwise true
240+
*/
241+
public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
242+
try {
243+
long start = System.currentTimeMillis();
244+
Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
245+
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
246+
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
247+
operation.handleAcl();
248+
LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
249+
System.currentTimeMillis() - start);
250+
return true;
251+
} catch (Exception e) {
252+
LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
253+
return false;
254+
}
255+
}
256+
257+
/**
258+
* Remove default acl from table archive dir when delete table
259+
* @param tableName the table name
260+
* @param removeUsers the users whose default acl will be removed
261+
* @return false if an error occurred, otherwise true
262+
*/
263+
public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
264+
try {
265+
long start = System.currentTimeMillis();
266+
Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
267+
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
268+
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
269+
operation.handleAcl();
270+
LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
271+
System.currentTimeMillis() - start);
272+
return true;
273+
} catch (Exception e) {
274+
LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
275+
return false;
276+
}
277+
}
278+
233279
/**
234280
* Add table user acls
235281
* @param tableName the table
@@ -349,7 +395,7 @@ private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
349395
Set<TableName> tables = new HashSet<>();
350396
for (String namespace : namespaces) {
351397
tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
352-
.filter(this::isTableUserScanSnapshotEnabled).map(TableDescriptor::getTableName)
398+
.filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
353399
.collect(Collectors.toSet()));
354400
}
355401
handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
@@ -403,7 +449,7 @@ void createTableDirectories(TableName tableName) throws IOException {
403449
* return paths that user will global permission will visit
404450
* @return the path list
405451
*/
406-
private List<Path> getGlobalRootPaths() {
452+
List<Path> getGlobalRootPaths() {
407453
return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
408454
pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
409455
}
@@ -511,9 +557,20 @@ boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
511557
return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
512558
}
513559

514-
boolean isTableUserScanSnapshotEnabled(TableDescriptor tableDescriptor) {
560+
public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
561+
String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
562+
Set<String> masterCoprocessorSet = new HashSet<>();
563+
if (masterCoprocessors != null) {
564+
Collections.addAll(masterCoprocessorSet, masterCoprocessors);
565+
}
566+
return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
567+
&& masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
568+
&& masterCoprocessorSet.contains(AccessController.class.getName());
569+
}
570+
571+
boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
515572
return tableDescriptor == null ? false
516-
: Boolean.valueOf(tableDescriptor.getValue(USER_SCAN_SNAPSHOT_ENABLE));
573+
: Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
517574
}
518575

519576
PathHelper getPathHelper() {

0 commit comments

Comments
 (0)