Skip to content

Commit ef1db63

Browse files
2 parents 5f48139 + 11cbda4 commit ef1db63

File tree

92 files changed

+2306
-1896
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

92 files changed

+2306
-1896
lines changed

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

Lines changed: 11 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -379,10 +379,6 @@ private static class DeprecatedKeyInfo {
379379
this.customMessage = customMessage;
380380
}
381381

382-
private final String getWarningMessage(String key) {
383-
return getWarningMessage(key, null);
384-
}
385-
386382
/**
387383
* Method to provide the warning message. It gives the custom message if
388384
* non-null, and default message otherwise.
@@ -412,12 +408,9 @@ private String getWarningMessage(String key, String source) {
412408
return warningMessage;
413409
}
414410

415-
boolean getAndSetAccessed() {
416-
return accessed.getAndSet(true);
417-
}
418-
419-
public void clearAccessed() {
420-
accessed.set(false);
411+
void logDeprecation(String name, String source) {
412+
LOG_DEPRECATION.info(getWarningMessage(name, source));
413+
this.accessed.set(true);
421414
}
422415
}
423416

@@ -728,12 +721,10 @@ private String[] handleDeprecation(DeprecationContext deprecations,
728721
}
729722
// Initialize the return value with requested name
730723
String[] names = new String[]{name};
731-
// Deprecated keys are logged once and an updated names are returned
724+
// Deprecated keys are logged and updated names are returned
732725
DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name);
733726
if (keyInfo != null) {
734-
if (!keyInfo.getAndSetAccessed()) {
735-
logDeprecation(keyInfo.getWarningMessage(name));
736-
}
727+
keyInfo.logDeprecation(name, null);
737728
// Override return value for deprecated keys
738729
names = keyInfo.newKeys;
739730
}
@@ -1462,13 +1453,6 @@ void logDeprecation(String message) {
14621453
LOG_DEPRECATION.info(message);
14631454
}
14641455

1465-
void logDeprecationOnce(String name, String source) {
1466-
DeprecatedKeyInfo keyInfo = getDeprecatedKeyInfo(name);
1467-
if (keyInfo != null && !keyInfo.getAndSetAccessed()) {
1468-
LOG_DEPRECATION.info(keyInfo.getWarningMessage(name, source));
1469-
}
1470-
}
1471-
14721456
/**
14731457
* Unset a previously set property.
14741458
* @param name the property name
@@ -2448,7 +2432,10 @@ private CredentialEntry getCredentialEntry(CredentialProvider provider,
24482432
if (oldName != null) {
24492433
entry = provider.getCredentialEntry(oldName);
24502434
if (entry != null) {
2451-
logDeprecationOnce(oldName, provider.toString());
2435+
DeprecatedKeyInfo ki = getDeprecatedKeyInfo(oldName);
2436+
if (ki != null) {
2437+
ki.logDeprecation(oldName, provider.toString());
2438+
}
24522439
return entry;
24532440
}
24542441
}
@@ -2459,7 +2446,7 @@ private CredentialEntry getCredentialEntry(CredentialProvider provider,
24592446
for (String newName : keyInfo.newKeys) {
24602447
entry = provider.getCredentialEntry(newName);
24612448
if (entry != null) {
2462-
logDeprecationOnce(name, null);
2449+
keyInfo.logDeprecation(name, null);
24632450
return entry;
24642451
}
24652452
}
@@ -3433,8 +3420,7 @@ void handleEndProperty() {
34333420
deprecations.getDeprecatedKeyMap().get(confName);
34343421

34353422
if (keyInfo != null) {
3436-
logDeprecation(keyInfo.getWarningMessage(confName, wrapper.toString()));
3437-
keyInfo.clearAccessed();
3423+
keyInfo.logDeprecation(confName, wrapper.toString());
34383424
for (String key : keyInfo.newKeys) {
34393425
// update new keys with deprecated key's value
34403426
results.add(new ParsedItem(

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -384,6 +384,36 @@ public void testDeprecatedPropertyInXMLFileGeneratesLogMessage(@TempDir java.nio
384384
assertTrue(hasDeprecationMessage);
385385
}
386386

387+
@Test
388+
public void testDeprecatedPropertyLogsWarningOnEveryUse(){
389+
String oldProp = "test.deprecation.old.conf.b";
390+
String newProp = "test.deprecation.new.conf.b";
391+
Configuration.addDeprecation(oldProp, newProp);
392+
393+
TestAppender appender = new TestAppender();
394+
Logger deprecationLogger = Logger.getLogger("org.apache.hadoop.conf.Configuration.deprecation");
395+
deprecationLogger.addAppender(appender);
396+
397+
try {
398+
conf.set(oldProp, "b1");
399+
conf.get(oldProp);
400+
conf.set(oldProp, "b2");
401+
conf.get(oldProp);
402+
// Using the new property should not log a warning
403+
conf.set(newProp, "b3");
404+
conf.get(newProp);
405+
conf.set(newProp, "b4");
406+
conf.get(newProp);
407+
} finally {
408+
deprecationLogger.removeAppender(appender);
409+
}
410+
411+
Pattern deprecationMsgPattern = Pattern.compile(oldProp + " is deprecated");
412+
long count = appender.log.stream().map(LoggingEvent::getRenderedMessage)
413+
.filter(msg -> deprecationMsgPattern.matcher(msg).find()).count();
414+
assertEquals(4, count, "Expected exactly four warnings for deprecated property usage");
415+
}
416+
387417
/**
388418
* A simple appender for white box testing.
389419
*/

hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,31 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
7777
<artifactId>junit</artifactId>
7878
<scope>test</scope>
7979
</dependency>
80+
<dependency>
81+
<groupId>org.junit.jupiter</groupId>
82+
<artifactId>junit-jupiter-api</artifactId>
83+
<scope>test</scope>
84+
</dependency>
85+
<dependency>
86+
<groupId>org.junit.jupiter</groupId>
87+
<artifactId>junit-jupiter-engine</artifactId>
88+
<scope>test</scope>
89+
</dependency>
90+
<dependency>
91+
<groupId>org.junit.jupiter</groupId>
92+
<artifactId>junit-jupiter-params</artifactId>
93+
<scope>test</scope>
94+
</dependency>
95+
<dependency>
96+
<groupId>org.junit.platform</groupId>
97+
<artifactId>junit-platform-launcher</artifactId>
98+
<scope>test</scope>
99+
</dependency>
100+
<dependency>
101+
<groupId>org.junit.vintage</groupId>
102+
<artifactId>junit-vintage-engine</artifactId>
103+
<scope>test</scope>
104+
</dependency>
80105
</dependencies>
81106

82107
<build>

hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,8 +263,8 @@ jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
263263
// Too bad...
264264
return getPendingExceptionAndClear(env);
265265
}
266-
exc = constructNewObjectOfClass(env, &out, "RuntimeException",
267-
"(java/lang/String;)V", jstr);
266+
exc = constructNewObjectOfClass(env, &out, "java/lang/RuntimeException",
267+
"(Ljava/lang/String;)V", jstr);
268268
(*env)->DeleteLocalRef(env, jstr);
269269
// Again, we'll either get an out of memory exception or the
270270
// RuntimeException we wanted.

hadoop-tools/hadoop-aws/pom.xml

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -499,11 +499,6 @@
499499
<artifactId>wildfly-openssl</artifactId>
500500
<scope>runtime</scope>
501501
</dependency>
502-
<dependency>
503-
<groupId>junit</groupId>
504-
<artifactId>junit</artifactId>
505-
<scope>test</scope>
506-
</dependency>
507502
<dependency>
508503
<groupId>org.mockito</groupId>
509504
<artifactId>mockito-inline</artifactId>
@@ -618,10 +613,5 @@
618613
<artifactId>junit-platform-launcher</artifactId>
619614
<scope>test</scope>
620615
</dependency>
621-
<dependency>
622-
<groupId>org.junit.vintage</groupId>
623-
<artifactId>junit-vintage-engine</artifactId>
624-
<scope>test</scope>
625-
</dependency>
626616
</dependencies>
627617
</project>

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -684,9 +684,8 @@ public void initialize(URI name, Configuration originalConf)
684684
s3ExpressStore = isS3ExpressStore(bucket, endpoint);
685685

686686
// should the delete also purge uploads?
687-
// happens if explicitly enabled, or if the store is S3Express storage.
688687
dirOperationsPurgeUploads = conf.getBoolean(DIRECTORY_OPERATIONS_PURGE_UPLOADS,
689-
s3ExpressStore);
688+
DIRECTORY_OPERATIONS_PURGE_UPLOADS_DEFAULT);
690689

691690
this.isMultipartUploadEnabled = conf.getBoolean(MULTIPART_UPLOADS_ENABLED,
692691
DEFAULT_MULTIPART_UPLOAD_ENABLED);

hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1218,10 +1218,29 @@ java.io.FileNotFoundException: Completing multi-part upload on fork-5/test/multi
12181218
This can happen when all outstanding uploads have been aborted, including the
12191219
active ones.
12201220

1221-
If the bucket has a lifecycle policy of deleting multipart uploads, make sure
1222-
that the expiry time of the deletion is greater than that required for all open
1223-
writes to complete the write,
1224-
*and for all jobs using the S3A committers to commit their work.*
1221+
When working with S3A committers and multipart uploads (MPUs), consider these important guidelines:
1222+
1223+
1. **Bucket Lifecycle Policies:**
1224+
- If your bucket has a lifecycle policy for deleting multipart uploads
1225+
- Set the deletion expiry time long enough to:
1226+
- Complete all open write operations
1227+
- Allow S3A committers to finish their commit process
1228+
1229+
2. **Directory Operations and MPUs:**
1230+
- Setting `fs.s3a.directory.operations.purge.uploads=true` will abort all pending MPUs before directory cleanup
1231+
- For jobs using S3A committers:
1232+
- Set `fs.s3a.directory.operations.purge.uploads=false` when directories need to be overwritten before job completion
1233+
- This prevents accidental abortion of active uploads during the commit phase
1234+
1235+
1236+
### S3 Express Store directory object not getting deleted
1237+
1238+
When working with S3 Express store buckets (unlike standard S3 buckets), follow these steps to purge a directory object:
1239+
1240+
1. Set `fs.s3a.directory.operations.purge.uploads=true` if you need to delete a directory object that has pending multipart uploads (MPUs).
1241+
1242+
2. This setting ensures that all pending MPUs are aborted before the directory object is deleted, which is a requirement specific to S3 Express store buckets.
1243+
12251244

12261245
### Application hangs after reading a number of files
12271246

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,8 @@
2828

2929
import org.apache.hadoop.conf.Configuration;
3030

31-
import org.junit.After;
32-
import org.junit.Before;
33-
import org.junit.Rule;
34-
import org.junit.rules.ExpectedException;
31+
import org.junit.jupiter.api.AfterEach;
32+
import org.junit.jupiter.api.BeforeEach;
3533

3634

3735
/**
@@ -49,14 +47,11 @@ public abstract class AbstractS3AMockTest {
4947
.build())
5048
.build();
5149

52-
@Rule
53-
public ExpectedException exception = ExpectedException.none();
54-
5550
protected S3AFileSystem fs;
5651
protected S3Client s3;
5752
protected Configuration conf;
5853

59-
@Before
54+
@BeforeEach
6055
public void setup() throws Exception {
6156
conf = createConfiguration();
6257
fs = new S3AFileSystem();
@@ -97,7 +92,7 @@ public S3Client getS3Client() {
9792
return s3;
9893
}
9994

100-
@After
95+
@AfterEach
10196
public void teardown() throws Exception {
10297
if (fs != null) {
10398
fs.close();

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,9 @@
2222
import org.apache.hadoop.util.SemaphoredDelegatingExecutor;
2323
import org.apache.hadoop.util.StopWatch;
2424

25-
import org.junit.AfterClass;
26-
import org.junit.Rule;
27-
import org.junit.Test;
28-
import org.junit.rules.Timeout;
25+
import org.junit.jupiter.api.AfterAll;
26+
import org.junit.jupiter.api.Test;
27+
import org.junit.jupiter.api.Timeout;
2928
import org.slf4j.Logger;
3029
import org.slf4j.LoggerFactory;
3130

@@ -35,11 +34,12 @@
3534
import java.util.concurrent.Future;
3635
import java.util.concurrent.TimeUnit;
3736

38-
import static org.junit.Assert.assertEquals;
37+
import static org.junit.jupiter.api.Assertions.assertEquals;
3938

4039
/**
4140
* Basic test for S3A's blocking executor service.
4241
*/
42+
@Timeout(60)
4343
public class ITestBlockingThreadPoolExecutorService {
4444

4545
private static final Logger LOG = LoggerFactory.getLogger(
@@ -56,10 +56,7 @@ public class ITestBlockingThreadPoolExecutorService {
5656

5757
private static BlockingThreadPoolExecutorService tpe;
5858

59-
@Rule
60-
public Timeout testTimeout = new Timeout(60, TimeUnit.SECONDS);
61-
62-
@AfterClass
59+
@AfterAll
6360
public static void afterClass() throws Exception {
6461
ensureDestroyed();
6562
}

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestEMRFSCompatibility.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
package org.apache.hadoop.fs.s3a;
2020

2121
import org.assertj.core.api.Assertions;
22-
import org.junit.Test;
22+
import org.junit.jupiter.api.Test;
2323

2424
import org.apache.hadoop.fs.Path;
2525

0 commit comments

Comments
 (0)