@@ -913,7 +913,7 @@ class DAGScheduler(
913913 partitionsToCompute.map { id =>
914914 val locs = getPreferredLocs(stage.rdd, id)
915915 val part = stage.rdd.partitions(id)
916- new ShuffleMapTask (stage.id, stage.attemptId, taskBinary, part, locs)
916+ new ShuffleMapTask (stage.id, stage.latestInfo. attemptId, taskBinary, part, locs)
917917 }
918918
919919 case stage : ResultStage =>
@@ -922,7 +922,7 @@ class DAGScheduler(
922922 val p : Int = job.partitions(id)
923923 val part = stage.rdd.partitions(p)
924924 val locs = getPreferredLocs(stage.rdd, p)
925- new ResultTask (stage.id, stage.attemptId, taskBinary, part, locs, id)
925+ new ResultTask (stage.id, stage.latestInfo. attemptId, taskBinary, part, locs, id)
926926 }
927927 }
928928 } catch {
@@ -1128,8 +1128,6 @@ class DAGScheduler(
11281128 val failedStage = stageIdToStage(task.stageId)
11291129 val mapStage = shuffleToMapStage(shuffleId)
11301130
1131- // failedStage.attemptId is already on the next attempt, so we have to use
1132- // failedStage.latestInfo.attemptId
11331131 if (failedStage.latestInfo.attemptId != task.stageAttemptId) {
11341132 logInfo(s " Ignoring fetch failure from $task as it's from $failedStage attempt " +
11351133 s " ${task.stageAttemptId}, which has already failed " )
0 commit comments