From 2c9bce9738209d36c573545e486cd528844fe107 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 09:36:09 -0700 Subject: [PATCH 01/10] updates migration region tags --- language/cloud-client/v1/quickstart.py | 8 ++++---- language/cloud-client/v1/snippets.py | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/language/cloud-client/v1/quickstart.py b/language/cloud-client/v1/quickstart.py index 3c19e395a42..7c075a513b6 100644 --- a/language/cloud-client/v1/quickstart.py +++ b/language/cloud-client/v1/quickstart.py @@ -18,16 +18,16 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library - # [START migration_import] + # [START language_python_migration_imports] from google.cloud import language from google.cloud.language import enums from google.cloud.language import types - # [END migration_import] + # [END language_python_migration_imports] # Instantiates a client - # [START migration_client] + # [START language_python_migration_client] client = language.LanguageServiceClient() - # [END migration_client] + # [END language_python_migration_client] # The text to analyze text = u'Hello, world!' diff --git a/language/cloud-client/v1/snippets.py b/language/cloud-client/v1/snippets.py index 30b591a4037..6a6aa86ff7c 100644 --- a/language/cloud-client/v1/snippets.py +++ b/language/cloud-client/v1/snippets.py @@ -39,12 +39,12 @@ def sentiment_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_document_text] - # [START migration_analyze_sentiment] + # [START language_python_migration_document_text] + # [START language_python_migration_analyze_sentiment] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) - # [END migration_document_text] + # [END language_python_migration_document_text] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -52,7 +52,7 @@ def sentiment_text(text): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) - # [END migration_analyze_sentiment] + # [END language_python_migration_analyze_sentiment] # [END def_sentiment_text] @@ -62,11 +62,11 @@ def sentiment_file(gcs_uri): client = language.LanguageServiceClient() # Instantiates a plain text document. - # [START migration_document_gcs_uri] + # [START language_python_migration_document_gcs_uri] document = types.Document( gcs_content_uri=gcs_uri, type=enums.Document.Type.PLAIN_TEXT) - # [END migration_document_gcs_uri] + # [END language_python_migration_document_gcs_uri] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -86,7 +86,7 @@ def entities_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_analyze_entities] + # [START language_python_migration_analyze_entities] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -107,7 +107,7 @@ def entities_text(text): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) - # [END migration_analyze_entities] + # [END language_python_migration_analyze_entities] # [END def_entities_text] @@ -149,7 +149,7 @@ def syntax_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_analyze_syntax] + # [START language_python_migration_analyze_syntax] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -165,7 +165,7 @@ def syntax_text(text): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) - # [END migration_analyze_syntax] + # [END language_python_migration_analyze_syntax] # [END def_syntax_text] From 32e9621ce7afd67771b0d6b7f3676f3757ea11d4 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 10:40:02 -0700 Subject: [PATCH 02/10] update entity region tags --- language/cloud-client/v1/snippets.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/language/cloud-client/v1/snippets.py b/language/cloud-client/v1/snippets.py index 6a6aa86ff7c..9055adfc285 100644 --- a/language/cloud-client/v1/snippets.py +++ b/language/cloud-client/v1/snippets.py @@ -77,7 +77,7 @@ def sentiment_file(gcs_uri): # [END def_sentiment_file] -# [START def_entities_text] +# [START language_entities_text] def entities_text(text): """Detects entities in the text.""" client = language.LanguageServiceClient() @@ -108,10 +108,10 @@ def entities_text(text): print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) # [END language_python_migration_analyze_entities] -# [END def_entities_text] +# [END language_entities_text] -# [START def_entities_file] +# [START language_entities_file_gcs] def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -137,7 +137,7 @@ def entities_file(gcs_uri): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) -# [END def_entities_file] +# [END language_entities_file_gcs] # [START def_syntax_text] @@ -193,7 +193,7 @@ def syntax_file(gcs_uri): # [END def_syntax_file] -# [START def_entity_sentiment_text] +# [START language_entity_sentiment_text] def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" client = language.LanguageServiceClient() @@ -223,9 +223,10 @@ def entity_sentiment_text(text): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END def_entity_sentiment_text] +# [END language_entity_sentiment_text] +# [START language_entity_sentiment_file_gcs] def entity_sentiment_file(gcs_uri): """Detects entity sentiment in a Google Cloud Storage file.""" client = language.LanguageServiceClient() @@ -251,6 +252,7 @@ def entity_sentiment_file(gcs_uri): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END language_entity_sentiment_file_gcs] # [START def_classify_text] From f8d343e1f37d3c2511cc736a71e513650c88d3e8 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 11:15:38 -0700 Subject: [PATCH 03/10] update sentiment tags --- language/cloud-client/v1/snippets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/language/cloud-client/v1/snippets.py b/language/cloud-client/v1/snippets.py index 9055adfc285..7e9ad893e8f 100644 --- a/language/cloud-client/v1/snippets.py +++ b/language/cloud-client/v1/snippets.py @@ -30,7 +30,7 @@ import six -# [START def_sentiment_text] +# [START language_sentiment_text] def sentiment_text(text): """Detects sentiment in the text.""" client = language.LanguageServiceClient() @@ -53,10 +53,10 @@ def sentiment_text(text): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) # [END language_python_migration_analyze_sentiment] -# [END def_sentiment_text] +# [END language_sentiment_text] -# [START def_sentiment_file] +# [START language_sentiment_file_gcs] def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -74,7 +74,7 @@ def sentiment_file(gcs_uri): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) -# [END def_sentiment_file] +# [END language_sentiment_file_gcs] # [START language_entities_text] From bfbe918ae926960aa62b21ff3484cce94c542b1f Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 11:28:39 -0700 Subject: [PATCH 04/10] update syntax region tags --- language/cloud-client/v1/snippets.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/language/cloud-client/v1/snippets.py b/language/cloud-client/v1/snippets.py index 7e9ad893e8f..d243d870a3f 100644 --- a/language/cloud-client/v1/snippets.py +++ b/language/cloud-client/v1/snippets.py @@ -40,7 +40,7 @@ def sentiment_text(text): # Instantiates a plain text document. # [START language_python_migration_document_text] - # [START language_python_migration_analyze_sentiment] + # [START language_python_migration_sentiment_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -52,7 +52,7 @@ def sentiment_text(text): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) - # [END language_python_migration_analyze_sentiment] + # [END language_python_migration_sentiment_text] # [END language_sentiment_text] @@ -140,7 +140,7 @@ def entities_file(gcs_uri): # [END language_entities_file_gcs] -# [START def_syntax_text] +# [START language_syntax_text] def syntax_text(text): """Detects syntax in the text.""" client = language.LanguageServiceClient() @@ -149,7 +149,7 @@ def syntax_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START language_python_migration_analyze_syntax] + # [START language_python_migration_syntax_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -165,11 +165,11 @@ def syntax_text(text): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) - # [END language_python_migration_analyze_syntax] -# [END def_syntax_text] + # [END language_python_migration_syntax_text] +# [END language_syntax_text] -# [START def_syntax_file] +# [START language_syntax_file_gcs] def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -190,7 +190,7 @@ def syntax_file(gcs_uri): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) -# [END def_syntax_file] +# [END language_syntax_file_gcs] # [START language_entity_sentiment_text] From d66f217fe1c8653ac40e0b2089ec1ab65c0e5430 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 11:42:30 -0700 Subject: [PATCH 05/10] update region tags for classify text tutorial --- .../classify_text/classify_text_tutorial.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/language/classify_text/classify_text_tutorial.py b/language/classify_text/classify_text_tutorial.py index 1ac9e0acb7b..2ce388cff09 100644 --- a/language/classify_text/classify_text_tutorial.py +++ b/language/classify_text/classify_text_tutorial.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START classify_text_tutorial] +# [START language_classify_text_tutorial] """Using the classify_text method to find content categories of text files, Then use the content category labels to compare text similarity. @@ -21,7 +21,7 @@ https://cloud.google.com/natural-language/docs/classify-text-tutorial. """ -# [START classify_text_tutorial_import] +# [START language_classify_text_tutorial_imports] import argparse import io import json @@ -30,10 +30,10 @@ from google.cloud import language import numpy import six -# [END classify_text_tutorial_import] +# [END language_classify_text_tutorial_imports] -# [START def_classify] +# [START language_classify_text_tutorial_classify] def classify(text, verbose=True): """Classify the input text into categories. """ @@ -61,10 +61,10 @@ def classify(text, verbose=True): print(u'{:<16}: {}'.format('confidence', category.confidence)) return result -# [END def_classify] +# [END language_classify_text_tutorial_classify] -# [START def_index] +# [START language_classify_text_tutorial_index] def index(path, index_file): """Classify each text file in a directory and write the results to the index_file. @@ -91,10 +91,10 @@ def index(path, index_file): print('Texts indexed in file: {}'.format(index_file)) return result -# [END def_index] +# [END language_classify_text_tutorial_index] -# [START def_split_labels] +# [START language_classify_text_tutorial_split_labels] def split_labels(categories): """The category labels are of the form "/a/b/c" up to three levels, for example "/Computers & Electronics/Software", and these labels @@ -121,10 +121,10 @@ def split_labels(categories): _categories[label] = confidence return _categories -# [END def_split_labels] +# [END language_classify_text_tutorial_split_labels] -# [START def_similarity] +# [START language_classify_text_tutorial_similarity] def similarity(categories1, categories2): """Cosine similarity of the categories treated as sparse vectors.""" categories1 = split_labels(categories1) @@ -143,10 +143,10 @@ def similarity(categories1, categories2): dot += confidence * categories2.get(label, 0.0) return dot / (norm1 * norm2) -# [END def_similarity] +# [END language_classify_text_tutorial_similarity] -# [START def_query] +# [START language_classify_text_tutorial_query] def query(index_file, text, n_top=3): """Find the indexed files that are the most similar to the query text. @@ -176,10 +176,10 @@ def query(index_file, text, n_top=3): print('\n') return similarities -# [END def_query] +# [END language_classify_text_tutorial_query] -# [START def_query_category] +# [START language_classify_text_tutorial_query_category] def query_category(index_file, category_string, n_top=3): """Find the indexed files that are the most similar to the query label. @@ -211,7 +211,7 @@ def query_category(index_file, category_string, n_top=3): print('\n') return similarities -# [END def_query_category] +# [END language_classify_text_tutorial_query_category] if __name__ == '__main__': @@ -255,4 +255,4 @@ def query_category(index_file, category_string, n_top=3): query(args.index_file, args.text) if args.command == 'query-category': query_category(args.index_file, args.category) -# [END classify_text_tutorial] +# [END language_classify_text_tutorial] From aaa32f016d416c5d2de780402930263e45c9f0b7 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 11:51:17 -0700 Subject: [PATCH 06/10] update classify region tags --- language/cloud-client/v1/snippets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/language/cloud-client/v1/snippets.py b/language/cloud-client/v1/snippets.py index d243d870a3f..c5e6ae040e4 100644 --- a/language/cloud-client/v1/snippets.py +++ b/language/cloud-client/v1/snippets.py @@ -255,7 +255,7 @@ def entity_sentiment_file(gcs_uri): # [END language_entity_sentiment_file_gcs] -# [START def_classify_text] +# [START language_classify_text] def classify_text(text): """Classifies content categories of the provided text.""" client = language.LanguageServiceClient() @@ -273,10 +273,10 @@ def classify_text(text): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_text] +# [END language_classify_text] -# [START def_classify_file] +# [START language_classify_file_gcs] def classify_file(gcs_uri): """Classifies content categories of the text in a Google Cloud Storage file. @@ -293,7 +293,7 @@ def classify_file(gcs_uri): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_file] +# [END language_classify_file_gcs] if __name__ == '__main__': From 6ad5f3794ae461f039b3b35ff597ebcc891d5af6 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 11:55:41 -0700 Subject: [PATCH 07/10] updates migration tags to match standard --- language/cloud-client/v1/snippets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/language/cloud-client/v1/snippets.py b/language/cloud-client/v1/snippets.py index c5e6ae040e4..ee0918c1989 100644 --- a/language/cloud-client/v1/snippets.py +++ b/language/cloud-client/v1/snippets.py @@ -62,11 +62,11 @@ def sentiment_file(gcs_uri): client = language.LanguageServiceClient() # Instantiates a plain text document. - # [START language_python_migration_document_gcs_uri] + # [START language_python_migration_document_gcs] document = types.Document( gcs_content_uri=gcs_uri, type=enums.Document.Type.PLAIN_TEXT) - # [END language_python_migration_document_gcs_uri] + # [END language_python_migration_document_gcs] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -86,7 +86,7 @@ def entities_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START language_python_migration_analyze_entities] + # [START language_python_migration_entities_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -107,7 +107,7 @@ def entities_text(text): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) - # [END language_python_migration_analyze_entities] + # [END language_python_migration_entities_text] # [END language_entities_text] From 37b84c71755f2d105a5edcab005dc2e79c38db51 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Sun, 19 Aug 2018 12:12:04 -0700 Subject: [PATCH 08/10] update tags for sentiment analysis tutorial --- language/sentiment/sentiment_analysis.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/language/sentiment/sentiment_analysis.py b/language/sentiment/sentiment_analysis.py index 8ac8575b08e..bab072cec8a 100644 --- a/language/sentiment/sentiment_analysis.py +++ b/language/sentiment/sentiment_analysis.py @@ -11,19 +11,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START sentiment_tutorial] +# [START language_sentiment_tutorial] """Demonstrates how to make a simple call to the Natural Language API.""" -# [START sentiment_tutorial_import] +# [START language_sentiment_tutorial_imports] import argparse from google.cloud import language from google.cloud.language import enums from google.cloud.language import types -# [END sentiment_tutorial_import] +# [END language_sentiment_tutorial_imports] -# [START def_print_result] +# [START language_sentiment_tutorial_print_result] def print_result(annotations): score = annotations.document_sentiment.score magnitude = annotations.document_sentiment.magnitude @@ -36,10 +36,10 @@ def print_result(annotations): print('Overall Sentiment: score of {} with magnitude of {}'.format( score, magnitude)) return 0 -# [END def_print_result] +# [END language_sentiment_tutorial_print_result] -# [START def_analyze] +# [START language_sentiment_tutorial_analyze_sentiment] def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" client = language.LanguageServiceClient() @@ -55,7 +55,7 @@ def analyze(movie_review_filename): # Print the results print_result(annotations) -# [END def_analyze] +# [END language_sentiment_tutorial_analyze_sentiment] if __name__ == '__main__': @@ -68,4 +68,4 @@ def analyze(movie_review_filename): args = parser.parse_args() analyze(args.movie_review_filename) -# [END sentiment_tutorial] +# [END language_sentiment_tutorial] From f074808195d338bc4581c0641fa798f80b91eb7e Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Mon, 20 Aug 2018 11:04:43 -0400 Subject: [PATCH 09/10] add run tag to sentiment tutorial --- language/sentiment/sentiment_analysis.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/language/sentiment/sentiment_analysis.py b/language/sentiment/sentiment_analysis.py index bab072cec8a..3b572bc2c94 100644 --- a/language/sentiment/sentiment_analysis.py +++ b/language/sentiment/sentiment_analysis.py @@ -58,6 +58,7 @@ def analyze(movie_review_filename): # [END language_sentiment_tutorial_analyze_sentiment] +# [START language_sentiment_tutorial_run_application] if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, @@ -68,4 +69,5 @@ def analyze(movie_review_filename): args = parser.parse_args() analyze(args.movie_review_filename) +# [END language_sentiment_tutorial_run_application] # [END language_sentiment_tutorial] From eefa4f430265575094123bb728fb4bb622b55b11 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Tue, 21 Aug 2018 14:57:46 -0400 Subject: [PATCH 10/10] updates all _file_gcs to _gcs --- language/cloud-client/v1/snippets.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/language/cloud-client/v1/snippets.py b/language/cloud-client/v1/snippets.py index ee0918c1989..3b1c02f9c68 100644 --- a/language/cloud-client/v1/snippets.py +++ b/language/cloud-client/v1/snippets.py @@ -56,7 +56,7 @@ def sentiment_text(text): # [END language_sentiment_text] -# [START language_sentiment_file_gcs] +# [START language_sentiment_gcs] def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -74,7 +74,7 @@ def sentiment_file(gcs_uri): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) -# [END language_sentiment_file_gcs] +# [END language_sentiment_gcs] # [START language_entities_text] @@ -111,7 +111,7 @@ def entities_text(text): # [END language_entities_text] -# [START language_entities_file_gcs] +# [START language_entities_gcs] def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -137,7 +137,7 @@ def entities_file(gcs_uri): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) -# [END language_entities_file_gcs] +# [END language_entities_gcs] # [START language_syntax_text] @@ -169,7 +169,7 @@ def syntax_text(text): # [END language_syntax_text] -# [START language_syntax_file_gcs] +# [START language_syntax_gcs] def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -190,7 +190,7 @@ def syntax_file(gcs_uri): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) -# [END language_syntax_file_gcs] +# [END language_syntax_gcs] # [START language_entity_sentiment_text] @@ -226,7 +226,7 @@ def entity_sentiment_text(text): # [END language_entity_sentiment_text] -# [START language_entity_sentiment_file_gcs] +# [START language_entity_sentiment_gcs] def entity_sentiment_file(gcs_uri): """Detects entity sentiment in a Google Cloud Storage file.""" client = language.LanguageServiceClient() @@ -252,7 +252,7 @@ def entity_sentiment_file(gcs_uri): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END language_entity_sentiment_file_gcs] +# [END language_entity_sentiment_gcs] # [START language_classify_text] @@ -276,7 +276,7 @@ def classify_text(text): # [END language_classify_text] -# [START language_classify_file_gcs] +# [START language_classify_gcs] def classify_file(gcs_uri): """Classifies content categories of the text in a Google Cloud Storage file. @@ -293,7 +293,7 @@ def classify_file(gcs_uri): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END language_classify_file_gcs] +# [END language_classify_gcs] if __name__ == '__main__':