diff --git a/dataproc/README.md b/dataproc/README.md index 16beb58eec3..8fe04973304 100644 --- a/dataproc/README.md +++ b/dataproc/README.md @@ -8,7 +8,7 @@ demonstrated here could also be accomplished using the Cloud Console or the gclo `list_clusters.py` is a simple command-line program to demonstrate connecting to the Dataproc API and listing the clusters in a ergion -`create_cluster_and_submit_jbo.py` demonstrates how to create a cluster, submit the +`create_cluster_and_submit_job.py` demonstrates how to create a cluster, submit the `pyspark_sort.py` job, download the output from Google Cloud Storage, and output the result. ## Prerequisites to run locally: diff --git a/dataproc/create_cluster_and_submit_job.py b/dataproc/create_cluster_and_submit_job.py index 9077f926c74..0cf58ad74c5 100644 --- a/dataproc/create_cluster_and_submit_job.py +++ b/dataproc/create_cluster_and_submit_job.py @@ -166,7 +166,6 @@ def wait_for_job(dataproc, project, job_id): jobId=job_id).execute() # Handle exceptions if result['status']['state'] == 'ERROR': - print(result) raise Exception(result['status']['details']) elif result['status']['state'] == 'DONE': print('Job finished')