diff --git a/refact-agent/gui/.refact/integrations.d/service_webserver.yaml b/refact-agent/gui/.refact/integrations.d/service_webserver.yaml index ab40d97de..19ff984c5 100644 --- a/refact-agent/gui/.refact/integrations.d/service_webserver.yaml +++ b/refact-agent/gui/.refact/integrations.d/service_webserver.yaml @@ -1,20 +1,20 @@ command: npm run dev -command_workdir: '' +command_workdir: "" description: Runs chat-js webserver, Working on URL localhost:5173 parameters: -- name: '' - type: string - description: '' -timeout: '' + - name: "gui_dev_server" + type: string + description: "" +timeout: "" output_filter: limit_lines: 100 limit_chars: 10000 valuable_top_or_bottom: top grep: (?i)error grep_context_lines: 5 - remove_from_output: '' -startup_wait_port: '6006' -startup_wait: '10' + remove_from_output: "" +startup_wait_port: "6006" +startup_wait: "10" startup_wait_keyword: http://localhost:5173/ available: on_your_laptop: true diff --git a/refact-agent/gui/generated/documents.ts b/refact-agent/gui/generated/documents.ts index ac31069c4..edca6a5a4 100644 --- a/refact-agent/gui/generated/documents.ts +++ b/refact-agent/gui/generated/documents.ts @@ -30,10 +30,8 @@ export type BasicStuffResult = { export type CloudtoolResultInput = { dollars?: Scalars['Float']['input']; - ft_id: Scalars['String']['input']; - ftm_alt: Scalars['Int']['input']; + fcall_id: Scalars['String']['input']; ftm_content: Scalars['String']['input']; - ftm_num: Scalars['Int']['input']; ftm_provenance: Scalars['String']['input']; }; @@ -113,6 +111,7 @@ export type FExpertChoiceConsequences = { export type FExpertInput = { fexp_allow_tools: Scalars['String']['input']; + fexp_app_capture_tools?: Scalars['String']['input']; fexp_block_tools: Scalars['String']['input']; fexp_name: Scalars['String']['input']; fexp_python_kernel: Scalars['String']['input']; @@ -125,6 +124,7 @@ export type FExpertInput = { export type FExpertOutput = { __typename?: 'FExpertOutput'; fexp_allow_tools: Scalars['String']['output']; + fexp_app_capture_tools?: Maybe; fexp_block_tools: Scalars['String']['output']; fexp_id: Scalars['String']['output']; fexp_name: Scalars['String']['output']; @@ -184,6 +184,7 @@ export type FExternalDataSourceSubs = { }; export type FKanbanTaskInput = { + details_json?: InputMaybe; state: Scalars['String']['input']; title: Scalars['String']['input']; }; @@ -233,18 +234,83 @@ export type FKnowledgeItemSubs = { export type FMarketplaceExpertInput = { fexp_allow_tools: Scalars['String']['input']; + fexp_app_capture_tools?: Scalars['String']['input']; fexp_block_tools: Scalars['String']['input']; fexp_name: Scalars['String']['input']; fexp_python_kernel: Scalars['String']['input']; fexp_system_prompt: Scalars['String']['input']; }; +export type FMarketplaceInstallOutput = { + __typename?: 'FMarketplaceInstallOutput'; + persona_id: Scalars['String']['output']; +}; + +export type FMarketplaceOutput = { + __typename?: 'FMarketplaceOutput'; + available_ws_id?: Maybe; + marketable_description: Scalars['String']['output']; + marketable_name: Scalars['String']['output']; + marketable_picture_big?: Maybe; + marketable_picture_small?: Maybe; + marketable_popularity_counter: Scalars['Int']['output']; + marketable_price: Scalars['Int']['output']; + marketable_star_event: Scalars['Int']['output']; + marketable_star_sum: Scalars['Int']['output']; + marketable_title1: Scalars['String']['output']; + marketable_title2: Scalars['String']['output']; + marketable_version: Scalars['String']['output']; + seller_fuser_id?: Maybe; +}; + export type FMassInvitationOutput = { __typename?: 'FMassInvitationOutput'; fuser_id: Scalars['String']['output']; result: Scalars['String']['output']; }; +export type FMcpServerInput = { + located_fgroup_id: Scalars['String']['input']; + mcp_command: Scalars['String']['input']; + mcp_description?: Scalars['String']['input']; + mcp_enabled?: Scalars['Boolean']['input']; + mcp_env_vars?: InputMaybe; + mcp_name: Scalars['String']['input']; +}; + +export type FMcpServerOutput = { + __typename?: 'FMcpServerOutput'; + located_fgroup_id: Scalars['String']['output']; + mcp_command: Scalars['String']['output']; + mcp_created_ts: Scalars['Float']['output']; + mcp_description: Scalars['String']['output']; + mcp_enabled: Scalars['Boolean']['output']; + mcp_env_vars?: Maybe; + mcp_id: Scalars['String']['output']; + mcp_modified_ts: Scalars['Float']['output']; + mcp_name: Scalars['String']['output']; + owner_fuser_id: Scalars['String']['output']; + owner_shared: Scalars['Boolean']['output']; +}; + +export type FMcpServerPatch = { + located_fgroup_id?: InputMaybe; + mcp_command?: InputMaybe; + mcp_description?: InputMaybe; + mcp_enabled?: InputMaybe; + mcp_env_vars?: InputMaybe; + mcp_name?: InputMaybe; + owner_shared?: InputMaybe; +}; + +export type FMcpServerSubs = { + __typename?: 'FMcpServerSubs'; + news_action: Scalars['String']['output']; + news_payload?: Maybe; + news_payload_id: Scalars['String']['output']; + news_pubsub: Scalars['String']['output']; +}; + export type FModelItem = { __typename?: 'FModelItem'; provm_name: Scalars['String']['output']; @@ -269,6 +335,12 @@ export type FPermissionSubs = { news_pubsub: Scalars['String']['output']; }; +export type FPersonaHistoryItemOutput = { + __typename?: 'FPersonaHistoryItemOutput'; + ft_id: Scalars['String']['output']; + title: Scalars['String']['output']; +}; + export type FPersonaInput = { located_fgroup_id: Scalars['String']['input']; persona_discounts?: InputMaybe; @@ -280,8 +352,8 @@ export type FPersonaInput = { export type FPersonaKanbanSubs = { __typename?: 'FPersonaKanbanSubs'; + bucket: Scalars['String']['output']; news_action: Scalars['String']['output']; - news_bucket: Scalars['String']['output']; news_payload_id: Scalars['String']['output']; news_payload_task?: Maybe; }; @@ -305,7 +377,12 @@ export type FPersonaKanbanTaskOutput = { export type FPersonaOutput = { __typename?: 'FPersonaOutput'; + history?: Maybe>; + latest_ft_id?: Maybe; located_fgroup_id: Scalars['String']['output']; + marketable_docker_image?: Maybe; + marketable_run_this?: Maybe; + marketable_setup_default?: Maybe; owner_fuser_id: Scalars['String']['output']; persona_archived_ts: Scalars['Float']['output']; persona_created_ts: Scalars['Float']['output']; @@ -315,6 +392,8 @@ export type FPersonaOutput = { persona_marketable_name: Scalars['String']['output']; persona_marketable_version: Scalars['String']['output']; persona_name: Scalars['String']['output']; + persona_picture_big?: Maybe; + persona_picture_small?: Maybe; persona_setup: Scalars['JSON']['output']; }; @@ -335,13 +414,6 @@ export type FPersonaSubs = { news_pubsub: Scalars['String']['output']; }; -export type FPluginOutput = { - __typename?: 'FPluginOutput'; - plugin_name: Scalars['String']['output']; - plugin_setup_page: Scalars['String']['output']; - plugin_version: Scalars['String']['output']; -}; - export type FStatsAddInput = { fgroup_id?: Scalars['String']['input']; st_chart: Scalars['Int']['input']; @@ -428,12 +500,6 @@ export type FThreadMessageSubs = { stream_delta?: Maybe; }; -export type FThreadMessagesCreateResult = { - __typename?: 'FThreadMessagesCreateResult'; - count: Scalars['Int']['output']; - messages: Array; -}; - export type FThreadMultipleMessagesInput = { ftm_belongs_to_ft_id: Scalars['String']['input']; messages: Array; @@ -553,6 +619,7 @@ export type Mutation = { bot_activate: FThreadOutput; bot_arrange_kanban_situation: Scalars['Boolean']['output']; bot_install_from_marketplace: Scalars['Boolean']['output']; + bot_kanban_post_into_inbox: Scalars['Boolean']['output']; cloudtool_post_result: Scalars['Boolean']['output']; create_captured_thread: FThreadOutput; email_confirm: EmailConfirmResult; @@ -574,7 +641,12 @@ export type Mutation = { knowledge_item_mass_group_patch: Scalars['Int']['output']; knowledge_item_patch: FKnowledgeItemOutput; make_sure_have_expert: Scalars['String']['output']; + marketplace_install: FMarketplaceInstallOutput; + marketplace_upgrade: Scalars['Boolean']['output']; marketplace_upsert_dev_bot: FBotInstallOutput; + mcp_server_create: FMcpServerOutput; + mcp_server_delete: Scalars['Boolean']['output']; + mcp_server_patch: FMcpServerOutput; password_change: Scalars['Boolean']['output']; permission_delete: Scalars['Boolean']['output']; permission_patch: FPermissionOutput; @@ -586,16 +658,14 @@ export type Mutation = { session_open: Scalars['String']['output']; session_renew: Scalars['String']['output']; stats_add: Scalars['Boolean']['output']; - tech_support_activate: Scalars['Boolean']['output']; - tech_support_set_config: Scalars['Boolean']['output']; + thread_app_capture_patch: Scalars['Boolean']['output']; thread_clear_confirmation: Scalars['Boolean']['output']; thread_create: FThreadOutput; thread_delete: Scalars['Boolean']['output']; thread_lock: Scalars['Boolean']['output']; thread_mass_group_patch: Scalars['Int']['output']; - thread_messages_create_multiple: FThreadMessagesCreateResult; + thread_messages_create_multiple: Scalars['Int']['output']; thread_patch: FThreadOutput; - thread_provide_toolset: Scalars['Boolean']['output']; thread_reset_error: Scalars['Boolean']['output']; thread_reset_title: Scalars['Boolean']['output']; thread_set_confirmation_request: Scalars['Boolean']['output']; @@ -642,6 +712,14 @@ export type MutationBot_Install_From_MarketplaceArgs = { }; +export type MutationBot_Kanban_Post_Into_InboxArgs = { + budget: Scalars['Int']['input']; + details_json: Scalars['String']['input']; + persona_id: Scalars['String']['input']; + title: Scalars['String']['input']; +}; + + export type MutationCloudtool_Post_ResultArgs = { input: CloudtoolResultInput; }; @@ -760,21 +838,54 @@ export type MutationMake_Sure_Have_ExpertArgs = { }; +export type MutationMarketplace_InstallArgs = { + fgroup_id: Scalars['String']['input']; + marketable_name: Scalars['String']['input']; +}; + + +export type MutationMarketplace_UpgradeArgs = { + fgroup_id: Scalars['String']['input']; + marketable_name: Scalars['String']['input']; + specific_version: Scalars['String']['input']; +}; + + export type MutationMarketplace_Upsert_Dev_BotArgs = { marketable_description: Scalars['String']['input']; - marketable_expert_scheduled: FMarketplaceExpertInput; - marketable_expert_setup: FMarketplaceExpertInput; - marketable_expert_subchat: FMarketplaceExpertInput; - marketable_expert_todo: FMarketplaceExpertInput; + marketable_expert_default: FMarketplaceExpertInput; + marketable_expert_setup?: InputMaybe; + marketable_expert_subchat?: InputMaybe; + marketable_expert_todo?: InputMaybe; marketable_github_repo: Scalars['String']['input']; marketable_name: Scalars['String']['input']; + marketable_picture_big_b64?: InputMaybe; + marketable_picture_small_b64?: InputMaybe; marketable_run_this: Scalars['String']['input']; marketable_setup_default: Scalars['String']['input']; + marketable_title1: Scalars['String']['input']; + marketable_title2: Scalars['String']['input']; marketable_version: Scalars['String']['input']; ws_id: Scalars['String']['input']; }; +export type MutationMcp_Server_CreateArgs = { + input: FMcpServerInput; +}; + + +export type MutationMcp_Server_DeleteArgs = { + id: Scalars['String']['input']; +}; + + +export type MutationMcp_Server_PatchArgs = { + id: Scalars['String']['input']; + patch: FMcpServerPatch; +}; + + export type MutationPassword_ChangeArgs = { new_password: Scalars['String']['input']; old_password: Scalars['String']['input']; @@ -832,14 +943,10 @@ export type MutationStats_AddArgs = { }; -export type MutationTech_Support_ActivateArgs = { - ws_id: Scalars['String']['input']; -}; - - -export type MutationTech_Support_Set_ConfigArgs = { - config: TechSupportSettingsInput; - ws_id: Scalars['String']['input']; +export type MutationThread_App_Capture_PatchArgs = { + ft_app_searchable?: InputMaybe; + ft_app_specific?: InputMaybe; + ft_id: Scalars['String']['input']; }; @@ -871,6 +978,7 @@ export type MutationThread_Mass_Group_PatchArgs = { export type MutationThread_Messages_Create_MultipleArgs = { + delete_negative?: InputMaybe>; input: FThreadMultipleMessagesInput; }; @@ -881,12 +989,6 @@ export type MutationThread_PatchArgs = { }; -export type MutationThread_Provide_ToolsetArgs = { - ft_id: Scalars['String']['input']; - toolset: Scalars['String']['input']; -}; - - export type MutationThread_Reset_ErrorArgs = { ft_error: Scalars['String']['input']; ft_id: Scalars['String']['input']; @@ -953,8 +1055,7 @@ export type Query = { api_key_list: Array; audit_list: Array; cloud_tools_list: Array; - expert_choice_consequences: Array; - expert_choice_consequences2: FExpertChoiceConsequences; + expert_choice_consequences: FExpertChoiceConsequences; expert_get: FExpertOutput; expert_list: Array; experts_effective_list: Array; @@ -967,15 +1068,19 @@ export type Query = { knowledge_item_get: FKnowledgeItemOutput; knowledge_item_list: Array; knowledge_vecdb_search: Array; + marketplace_details: Array; + marketplace_list: Array; + marketplace_search: Array; + mcp_server_get: FMcpServerOutput; + mcp_server_list: Array; permission_list: Array; persona_get: FPersonaOutput; persona_list: Array; - plugins_installed: Array; + persona_opened_in_ui: FPersonaOutput; query_basic_stuff: BasicStuffResult; reset_password_token_info: PasswordResetTokenInfo; stats_query: Array; stats_query_distinct: StatsDistinctOutput; - tech_support_get_config?: Maybe; thread_get: FThreadOutput; thread_list: Array; thread_messages_list: Array; @@ -1005,12 +1110,6 @@ export type QueryExpert_Choice_ConsequencesArgs = { }; -export type QueryExpert_Choice_Consequences2Args = { - fexp_id: Scalars['String']['input']; - inside_fgroup_id: Scalars['String']['input']; -}; - - export type QueryExpert_GetArgs = { id: Scalars['String']['input']; }; @@ -1082,6 +1181,38 @@ export type QueryKnowledge_Vecdb_SearchArgs = { }; +export type QueryMarketplace_DetailsArgs = { + fgroup_id: Scalars['String']['input']; + marketable_name: Scalars['String']['input']; +}; + + +export type QueryMarketplace_ListArgs = { + fgroup_id: Scalars['String']['input']; + take?: Scalars['Int']['input']; +}; + + +export type QueryMarketplace_SearchArgs = { + fgroup_id: Scalars['String']['input']; + query: Scalars['String']['input']; + take?: Scalars['Int']['input']; +}; + + +export type QueryMcp_Server_GetArgs = { + id: Scalars['String']['input']; +}; + + +export type QueryMcp_Server_ListArgs = { + limit: Scalars['Int']['input']; + located_fgroup_id: Scalars['String']['input']; + skip: Scalars['Int']['input']; + sort_by?: Scalars['String']['input']; +}; + + export type QueryPermission_ListArgs = { fgroup_id: Scalars['String']['input']; }; @@ -1100,6 +1231,11 @@ export type QueryPersona_ListArgs = { }; +export type QueryPersona_Opened_In_UiArgs = { + persona_id: Scalars['String']['input']; +}; + + export type QueryQuery_Basic_StuffArgs = { want_invitations?: Scalars['Boolean']['input']; }; @@ -1140,11 +1276,6 @@ export type QueryStats_Query_DistinctArgs = { }; -export type QueryTech_Support_Get_ConfigArgs = { - ws_id: Scalars['String']['input']; -}; - - export type QueryThread_GetArgs = { id: Scalars['String']['input']; }; @@ -1202,6 +1333,7 @@ export type Subscription = { experts_in_group: FExpertSubs; external_data_sources_in_group: FExternalDataSourceSubs; knowledge_items_in_group: FKnowledgeItemSubs; + mcp_servers_in_group: FMcpServerSubs; permissions_in_group_subs: FPermissionSubs; persona_kanban_subs: FPersonaKanbanSubs; personas_in_group: FPersonaSubs; @@ -1245,6 +1377,14 @@ export type SubscriptionKnowledge_Items_In_GroupArgs = { }; +export type SubscriptionMcp_Servers_In_GroupArgs = { + filter?: Array; + limit?: Scalars['Int']['input']; + located_fgroup_id: Scalars['String']['input']; + sort_by?: Array; +}; + + export type SubscriptionPermissions_In_Group_SubsArgs = { fgroup_id: Scalars['String']['input']; limit: Scalars['Int']['input']; @@ -1280,23 +1420,6 @@ export type SubscriptionTree_SubscriptionArgs = { ws_id: Scalars['String']['input']; }; -export type TechSupportSettingsInput = { - support_api_key: Scalars['String']['input']; - support_channel_list: Array; - support_discord_key: Scalars['String']['input']; - support_fgroup_id: Scalars['String']['input']; - support_fuser_id: Scalars['String']['input']; -}; - -export type TechSupportSettingsOutput = { - __typename?: 'TechSupportSettingsOutput'; - support_api_key: Scalars['String']['output']; - support_channel_list: Array; - support_discord_key: Scalars['String']['output']; - support_fgroup_id: Scalars['String']['output']; - support_fuser_id: Scalars['String']['output']; -}; - export type TreeUpdateSubs = { __typename?: 'TreeUpdateSubs'; treeupd_action: Scalars['String']['output']; @@ -1343,7 +1466,7 @@ export type MessageCreateMultipleMutationVariables = Exact<{ }>; -export type MessageCreateMultipleMutation = { __typename?: 'Mutation', thread_messages_create_multiple: { __typename?: 'FThreadMessagesCreateResult', count: number } }; +export type MessageCreateMultipleMutation = { __typename?: 'Mutation', thread_messages_create_multiple: number }; export type ThreadPatchMutationVariables = Exact<{ id: Scalars['String']['input']; @@ -1366,7 +1489,7 @@ export type ModelsForExpertQueryVariables = Exact<{ }>; -export type ModelsForExpertQuery = { __typename?: 'Query', expert_choice_consequences: Array<{ __typename?: 'FModelItem', provm_name: string }> }; +export type ModelsForExpertQuery = { __typename?: 'Query', expert_choice_consequences: { __typename?: 'FExpertChoiceConsequences', models: Array<{ __typename?: 'FModelItem', provm_name: string }> } }; export type ToolsForGroupQueryVariables = Exact<{ located_fgroup_id: Scalars['String']['input']; @@ -1408,10 +1531,10 @@ export const ThreadsPageSubsDocument = {"kind":"Document","definitions":[{"kind" export const DeleteThreadDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"DeleteThread"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_delete"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}}]}]}}]} as unknown as DocumentNode; export const CreateThreadDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"CreateThread"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"FThreadInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_create"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ft_id"}}]}}]}}]} as unknown as DocumentNode; export const MessagesSubscriptionDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"subscription","name":{"kind":"Name","value":"MessagesSubscription"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"want_deltas"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"Boolean"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"comprehensive_thread_subs"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"ft_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"want_deltas"},"value":{"kind":"Variable","name":{"kind":"Name","value":"want_deltas"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"news_action"}},{"kind":"Field","name":{"kind":"Name","value":"news_payload_id"}},{"kind":"Field","name":{"kind":"Name","value":"news_payload_thread_message"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ft_app_specific"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_belongs_to_ft_id"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_alt"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_num"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_prev_alt"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_role"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_content"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_tool_calls"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_call_id"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_usage"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_created_ts"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_user_preferences"}}]}},{"kind":"Field","name":{"kind":"Name","value":"stream_delta"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ftm_role"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_content"}}]}},{"kind":"Field","name":{"kind":"Name","value":"news_payload_thread"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"located_fgroup_id"}},{"kind":"Field","name":{"kind":"Name","value":"ft_id"}},{"kind":"Field","name":{"kind":"Name","value":"ft_need_user"}},{"kind":"Field","name":{"kind":"Name","value":"ft_need_assistant"}},{"kind":"Field","name":{"kind":"Name","value":"ft_fexp_id"}},{"kind":"Field","name":{"kind":"Name","value":"ft_confirmation_request"}},{"kind":"Field","name":{"kind":"Name","value":"ft_confirmation_response"}},{"kind":"Field","name":{"kind":"Name","value":"ft_title"}}]}}]}}]}}]} as unknown as DocumentNode; -export const MessageCreateMultipleDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"MessageCreateMultiple"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"FThreadMultipleMessagesInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_messages_create_multiple"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"count"}}]}}]}}]} as unknown as DocumentNode; +export const MessageCreateMultipleDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"MessageCreateMultiple"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"FThreadMultipleMessagesInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_messages_create_multiple"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}]}]}}]} as unknown as DocumentNode; export const ThreadPatchDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"ThreadPatch"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"message"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_patch"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}},{"kind":"Argument","name":{"kind":"Name","value":"patch"},"value":{"kind":"ObjectValue","fields":[{"kind":"ObjectField","name":{"kind":"Name","value":"ft_error"},"value":{"kind":"Variable","name":{"kind":"Name","value":"message"}}}]}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ft_id"}}]}}]}}]} as unknown as DocumentNode; export const ExpertsForGroupDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ExpertsForGroup"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"experts_effective_list"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"located_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"fexp_id"}},{"kind":"Field","name":{"kind":"Name","value":"fexp_name"}}]}}]}}]} as unknown as DocumentNode; -export const ModelsForExpertDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ModelsForExpert"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"expert_choice_consequences"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"fexp_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"inside_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"provm_name"}}]}}]}}]} as unknown as DocumentNode; +export const ModelsForExpertDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ModelsForExpert"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"expert_choice_consequences"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"fexp_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"inside_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"models"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"provm_name"}}]}}]}}]}}]} as unknown as DocumentNode; export const ToolsForGroupDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ToolsForGroup"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"cloud_tools_list"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"located_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"include_offline"},"value":{"kind":"BooleanValue","value":false}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ctool_confirmed_exists_ts"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_description"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_id"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_name"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_parameters"}},{"kind":"Field","name":{"kind":"Name","value":"located_fgroup_id"}},{"kind":"Field","name":{"kind":"Name","value":"owner_fuser_id"}}]}}]}}]} as unknown as DocumentNode; export const ThreadConfirmationResponseDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"ThreadConfirmationResponse"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"confirmation_response"}},"type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}},"defaultValue":{"kind":"StringValue","value":"","block":false}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}},"type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}},"defaultValue":{"kind":"StringValue","value":"","block":false}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_set_confirmation_response"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"ft_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"confirmation_response"},"value":{"kind":"Variable","name":{"kind":"Name","value":"confirmation_response"}}}]}]}}]} as unknown as DocumentNode; export const BasicStuffDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BasicStuff"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"query_basic_stuff"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"fuser_id"}},{"kind":"Field","name":{"kind":"Name","value":"my_own_ws_id"}},{"kind":"Field","name":{"kind":"Name","value":"workspaces"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ws_id"}},{"kind":"Field","name":{"kind":"Name","value":"ws_owner_fuser_id"}},{"kind":"Field","name":{"kind":"Name","value":"ws_root_group_id"}},{"kind":"Field","name":{"kind":"Name","value":"root_group_name"}},{"kind":"Field","name":{"kind":"Name","value":"have_coins_exactly"}},{"kind":"Field","name":{"kind":"Name","value":"have_coins_enough"}},{"kind":"Field","name":{"kind":"Name","value":"have_admin"}}]}}]}}]}}]} as unknown as DocumentNode; @@ -1431,10 +1554,8 @@ export const definedNonNullAnySchema = z.any().refine((v) => isDefinedNonNullAny export function CloudtoolResultInputSchema(): z.ZodObject> { return z.object({ dollars: z.number().default(0), - ft_id: z.string(), - ftm_alt: z.number(), + fcall_id: z.string(), ftm_content: z.string(), - ftm_num: z.number(), ftm_provenance: z.string() }) } @@ -1442,6 +1563,7 @@ export function CloudtoolResultInputSchema(): z.ZodObject> { return z.object({ fexp_allow_tools: z.string(), + fexp_app_capture_tools: z.string().default("null"), fexp_block_tools: z.string(), fexp_name: z.string(), fexp_python_kernel: z.string(), @@ -1479,6 +1601,7 @@ export function FExternalDataSourcePatchSchema(): z.ZodObject> { return z.object({ + details_json: z.string().nullish(), state: z.string(), title: z.string() }) @@ -1507,6 +1630,7 @@ export function FKnowledgeItemPatchSchema(): z.ZodObject> { return z.object({ fexp_allow_tools: z.string(), + fexp_app_capture_tools: z.string().default(""), fexp_block_tools: z.string(), fexp_name: z.string(), fexp_python_kernel: z.string(), @@ -1514,6 +1638,29 @@ export function FMarketplaceExpertInputSchema(): z.ZodObject> { + return z.object({ + located_fgroup_id: z.string(), + mcp_command: z.string(), + mcp_description: z.string().default(""), + mcp_enabled: z.boolean().default(false), + mcp_env_vars: definedNonNullAnySchema.nullish(), + mcp_name: z.string() + }) +} + +export function FMcpServerPatchSchema(): z.ZodObject> { + return z.object({ + located_fgroup_id: z.string().nullish(), + mcp_command: z.string().nullish(), + mcp_description: z.string().nullish(), + mcp_enabled: z.boolean().nullish(), + mcp_env_vars: definedNonNullAnySchema.nullish(), + mcp_name: z.string().nullish(), + owner_shared: z.boolean().nullish() + }) +} + export function FPermissionPatchSchema(): z.ZodObject> { return z.object({ perm_roles: z.number() @@ -1645,13 +1792,3 @@ export function RegisterInputSchema(): z.ZodObject> { username: z.string() }) } - -export function TechSupportSettingsInputSchema(): z.ZodObject> { - return z.object({ - support_api_key: z.string(), - support_channel_list: z.array(z.string()), - support_discord_key: z.string(), - support_fgroup_id: z.string(), - support_fuser_id: z.string() - }) -} diff --git a/refact-agent/gui/generated/graphql/gql.ts b/refact-agent/gui/generated/graphql/gql.ts index 88cc1928b..d3a1ac1d2 100644 --- a/refact-agent/gui/generated/graphql/gql.ts +++ b/refact-agent/gui/generated/graphql/gql.ts @@ -14,10 +14,10 @@ import type { TypedDocumentNode as DocumentNode } from '@graphql-typed-document- * Learn more about it here: https://the-guild.dev/graphql/codegen/plugins/presets/preset-client#reducing-bundle-size */ type Documents = { - "subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input) {\n count\n }\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n provm_name\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}": typeof types.ThreadsPageSubsDocument, + "subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input)\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n models {\n provm_name\n }\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}": typeof types.ThreadsPageSubsDocument, }; const documents: Documents = { - "subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input) {\n count\n }\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n provm_name\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}": types.ThreadsPageSubsDocument, + "subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input)\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n models {\n provm_name\n }\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}": types.ThreadsPageSubsDocument, }; /** @@ -37,7 +37,7 @@ export function graphql(source: string): unknown; /** * The graphql function is used to parse GraphQL queries into a document that can be used by GraphQL clients. */ -export function graphql(source: "subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input) {\n count\n }\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n provm_name\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}"): (typeof documents)["subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input) {\n count\n }\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n provm_name\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}"]; +export function graphql(source: "subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input)\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n models {\n provm_name\n }\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}"): (typeof documents)["subscription ThreadsPageSubs($located_fgroup_id: String!, $limit: Int!) {\n threads_in_group(located_fgroup_id: $located_fgroup_id, limit: $limit) {\n news_action\n news_payload_id\n news_payload {\n owner_fuser_id\n owner_shared\n ft_id\n ft_title\n ft_error\n ft_updated_ts\n ft_locked_by\n ft_need_assistant\n ft_need_tool_calls\n ft_archived_ts\n ft_created_ts\n }\n }\n}\n\nmutation DeleteThread($id: String!) {\n thread_delete(id: $id)\n}\n\nmutation CreateThread($input: FThreadInput!) {\n thread_create(input: $input) {\n ft_id\n }\n}\n\nsubscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) {\n comprehensive_thread_subs(ft_id: $ft_id, want_deltas: $want_deltas) {\n news_action\n news_payload_id\n news_payload_thread_message {\n ft_app_specific\n ftm_belongs_to_ft_id\n ftm_alt\n ftm_num\n ftm_prev_alt\n ftm_role\n ftm_content\n ftm_tool_calls\n ftm_call_id\n ftm_usage\n ftm_created_ts\n ftm_user_preferences\n }\n stream_delta {\n ftm_role\n ftm_content\n }\n news_payload_thread {\n located_fgroup_id\n ft_id\n ft_need_user\n ft_need_assistant\n ft_fexp_id\n ft_confirmation_request\n ft_confirmation_response\n ft_title\n }\n }\n}\n\nmutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) {\n thread_messages_create_multiple(input: $input)\n}\n\nmutation ThreadPatch($id: String!, $message: String!) {\n thread_patch(id: $id, patch: {ft_error: $message}) {\n ft_id\n }\n}\n\nquery ExpertsForGroup($located_fgroup_id: String!) {\n experts_effective_list(located_fgroup_id: $located_fgroup_id) {\n fexp_id\n fexp_name\n }\n}\n\nquery ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) {\n expert_choice_consequences(\n fexp_id: $fexp_id\n inside_fgroup_id: $inside_fgroup_id\n ) {\n models {\n provm_name\n }\n }\n}\n\nquery ToolsForGroup($located_fgroup_id: String!) {\n cloud_tools_list(located_fgroup_id: $located_fgroup_id, include_offline: false) {\n ctool_confirmed_exists_ts\n ctool_description\n ctool_id\n ctool_name\n ctool_parameters\n located_fgroup_id\n owner_fuser_id\n }\n}\n\nmutation ThreadConfirmationResponse($confirmation_response: String = \"\", $ft_id: String = \"\") {\n thread_set_confirmation_response(\n ft_id: $ft_id\n confirmation_response: $confirmation_response\n )\n}\n\nquery BasicStuff {\n query_basic_stuff {\n fuser_id\n my_own_ws_id\n workspaces {\n ws_id\n ws_owner_fuser_id\n ws_root_group_id\n root_group_name\n have_coins_exactly\n have_coins_enough\n have_admin\n }\n }\n}\n\nmutation CreateWorkSpaceGroup($fgroup_name: String!, $fgroup_parent_id: String!) {\n group_create(\n input: {fgroup_name: $fgroup_name, fgroup_parent_id: $fgroup_parent_id}\n ) {\n fgroup_id\n fgroup_name\n ws_id\n fgroup_parent_id\n fgroup_created_ts\n }\n}\n\nsubscription WorkspaceTree($ws_id: String!) {\n tree_subscription(ws_id: $ws_id) {\n treeupd_action\n treeupd_id\n treeupd_path\n treeupd_type\n treeupd_title\n }\n}"]; export function graphql(source: string) { return (documents as any)[source] ?? {}; diff --git a/refact-agent/gui/generated/graphql/graphql.ts b/refact-agent/gui/generated/graphql/graphql.ts index 5371f18ad..20a8ca809 100644 --- a/refact-agent/gui/generated/graphql/graphql.ts +++ b/refact-agent/gui/generated/graphql/graphql.ts @@ -32,10 +32,8 @@ export type BasicStuffResult = { export type CloudtoolResultInput = { dollars?: Scalars['Float']['input']; - ft_id: Scalars['String']['input']; - ftm_alt: Scalars['Int']['input']; + fcall_id: Scalars['String']['input']; ftm_content: Scalars['String']['input']; - ftm_num: Scalars['Int']['input']; ftm_provenance: Scalars['String']['input']; }; @@ -115,6 +113,7 @@ export type FExpertChoiceConsequences = { export type FExpertInput = { fexp_allow_tools: Scalars['String']['input']; + fexp_app_capture_tools?: Scalars['String']['input']; fexp_block_tools: Scalars['String']['input']; fexp_name: Scalars['String']['input']; fexp_python_kernel: Scalars['String']['input']; @@ -127,6 +126,7 @@ export type FExpertInput = { export type FExpertOutput = { __typename?: 'FExpertOutput'; fexp_allow_tools: Scalars['String']['output']; + fexp_app_capture_tools?: Maybe; fexp_block_tools: Scalars['String']['output']; fexp_id: Scalars['String']['output']; fexp_name: Scalars['String']['output']; @@ -186,6 +186,7 @@ export type FExternalDataSourceSubs = { }; export type FKanbanTaskInput = { + details_json?: InputMaybe; state: Scalars['String']['input']; title: Scalars['String']['input']; }; @@ -235,18 +236,83 @@ export type FKnowledgeItemSubs = { export type FMarketplaceExpertInput = { fexp_allow_tools: Scalars['String']['input']; + fexp_app_capture_tools?: Scalars['String']['input']; fexp_block_tools: Scalars['String']['input']; fexp_name: Scalars['String']['input']; fexp_python_kernel: Scalars['String']['input']; fexp_system_prompt: Scalars['String']['input']; }; +export type FMarketplaceInstallOutput = { + __typename?: 'FMarketplaceInstallOutput'; + persona_id: Scalars['String']['output']; +}; + +export type FMarketplaceOutput = { + __typename?: 'FMarketplaceOutput'; + available_ws_id?: Maybe; + marketable_description: Scalars['String']['output']; + marketable_name: Scalars['String']['output']; + marketable_picture_big?: Maybe; + marketable_picture_small?: Maybe; + marketable_popularity_counter: Scalars['Int']['output']; + marketable_price: Scalars['Int']['output']; + marketable_star_event: Scalars['Int']['output']; + marketable_star_sum: Scalars['Int']['output']; + marketable_title1: Scalars['String']['output']; + marketable_title2: Scalars['String']['output']; + marketable_version: Scalars['String']['output']; + seller_fuser_id?: Maybe; +}; + export type FMassInvitationOutput = { __typename?: 'FMassInvitationOutput'; fuser_id: Scalars['String']['output']; result: Scalars['String']['output']; }; +export type FMcpServerInput = { + located_fgroup_id: Scalars['String']['input']; + mcp_command: Scalars['String']['input']; + mcp_description?: Scalars['String']['input']; + mcp_enabled?: Scalars['Boolean']['input']; + mcp_env_vars?: InputMaybe; + mcp_name: Scalars['String']['input']; +}; + +export type FMcpServerOutput = { + __typename?: 'FMcpServerOutput'; + located_fgroup_id: Scalars['String']['output']; + mcp_command: Scalars['String']['output']; + mcp_created_ts: Scalars['Float']['output']; + mcp_description: Scalars['String']['output']; + mcp_enabled: Scalars['Boolean']['output']; + mcp_env_vars?: Maybe; + mcp_id: Scalars['String']['output']; + mcp_modified_ts: Scalars['Float']['output']; + mcp_name: Scalars['String']['output']; + owner_fuser_id: Scalars['String']['output']; + owner_shared: Scalars['Boolean']['output']; +}; + +export type FMcpServerPatch = { + located_fgroup_id?: InputMaybe; + mcp_command?: InputMaybe; + mcp_description?: InputMaybe; + mcp_enabled?: InputMaybe; + mcp_env_vars?: InputMaybe; + mcp_name?: InputMaybe; + owner_shared?: InputMaybe; +}; + +export type FMcpServerSubs = { + __typename?: 'FMcpServerSubs'; + news_action: Scalars['String']['output']; + news_payload?: Maybe; + news_payload_id: Scalars['String']['output']; + news_pubsub: Scalars['String']['output']; +}; + export type FModelItem = { __typename?: 'FModelItem'; provm_name: Scalars['String']['output']; @@ -271,6 +337,12 @@ export type FPermissionSubs = { news_pubsub: Scalars['String']['output']; }; +export type FPersonaHistoryItemOutput = { + __typename?: 'FPersonaHistoryItemOutput'; + ft_id: Scalars['String']['output']; + title: Scalars['String']['output']; +}; + export type FPersonaInput = { located_fgroup_id: Scalars['String']['input']; persona_discounts?: InputMaybe; @@ -282,8 +354,8 @@ export type FPersonaInput = { export type FPersonaKanbanSubs = { __typename?: 'FPersonaKanbanSubs'; + bucket: Scalars['String']['output']; news_action: Scalars['String']['output']; - news_bucket: Scalars['String']['output']; news_payload_id: Scalars['String']['output']; news_payload_task?: Maybe; }; @@ -307,7 +379,12 @@ export type FPersonaKanbanTaskOutput = { export type FPersonaOutput = { __typename?: 'FPersonaOutput'; + history?: Maybe>; + latest_ft_id?: Maybe; located_fgroup_id: Scalars['String']['output']; + marketable_docker_image?: Maybe; + marketable_run_this?: Maybe; + marketable_setup_default?: Maybe; owner_fuser_id: Scalars['String']['output']; persona_archived_ts: Scalars['Float']['output']; persona_created_ts: Scalars['Float']['output']; @@ -317,6 +394,8 @@ export type FPersonaOutput = { persona_marketable_name: Scalars['String']['output']; persona_marketable_version: Scalars['String']['output']; persona_name: Scalars['String']['output']; + persona_picture_big?: Maybe; + persona_picture_small?: Maybe; persona_setup: Scalars['JSON']['output']; }; @@ -337,13 +416,6 @@ export type FPersonaSubs = { news_pubsub: Scalars['String']['output']; }; -export type FPluginOutput = { - __typename?: 'FPluginOutput'; - plugin_name: Scalars['String']['output']; - plugin_setup_page: Scalars['String']['output']; - plugin_version: Scalars['String']['output']; -}; - export type FStatsAddInput = { fgroup_id?: Scalars['String']['input']; st_chart: Scalars['Int']['input']; @@ -430,12 +502,6 @@ export type FThreadMessageSubs = { stream_delta?: Maybe; }; -export type FThreadMessagesCreateResult = { - __typename?: 'FThreadMessagesCreateResult'; - count: Scalars['Int']['output']; - messages: Array; -}; - export type FThreadMultipleMessagesInput = { ftm_belongs_to_ft_id: Scalars['String']['input']; messages: Array; @@ -555,6 +621,7 @@ export type Mutation = { bot_activate: FThreadOutput; bot_arrange_kanban_situation: Scalars['Boolean']['output']; bot_install_from_marketplace: Scalars['Boolean']['output']; + bot_kanban_post_into_inbox: Scalars['Boolean']['output']; cloudtool_post_result: Scalars['Boolean']['output']; create_captured_thread: FThreadOutput; email_confirm: EmailConfirmResult; @@ -576,7 +643,12 @@ export type Mutation = { knowledge_item_mass_group_patch: Scalars['Int']['output']; knowledge_item_patch: FKnowledgeItemOutput; make_sure_have_expert: Scalars['String']['output']; + marketplace_install: FMarketplaceInstallOutput; + marketplace_upgrade: Scalars['Boolean']['output']; marketplace_upsert_dev_bot: FBotInstallOutput; + mcp_server_create: FMcpServerOutput; + mcp_server_delete: Scalars['Boolean']['output']; + mcp_server_patch: FMcpServerOutput; password_change: Scalars['Boolean']['output']; permission_delete: Scalars['Boolean']['output']; permission_patch: FPermissionOutput; @@ -588,16 +660,14 @@ export type Mutation = { session_open: Scalars['String']['output']; session_renew: Scalars['String']['output']; stats_add: Scalars['Boolean']['output']; - tech_support_activate: Scalars['Boolean']['output']; - tech_support_set_config: Scalars['Boolean']['output']; + thread_app_capture_patch: Scalars['Boolean']['output']; thread_clear_confirmation: Scalars['Boolean']['output']; thread_create: FThreadOutput; thread_delete: Scalars['Boolean']['output']; thread_lock: Scalars['Boolean']['output']; thread_mass_group_patch: Scalars['Int']['output']; - thread_messages_create_multiple: FThreadMessagesCreateResult; + thread_messages_create_multiple: Scalars['Int']['output']; thread_patch: FThreadOutput; - thread_provide_toolset: Scalars['Boolean']['output']; thread_reset_error: Scalars['Boolean']['output']; thread_reset_title: Scalars['Boolean']['output']; thread_set_confirmation_request: Scalars['Boolean']['output']; @@ -644,6 +714,14 @@ export type MutationBot_Install_From_MarketplaceArgs = { }; +export type MutationBot_Kanban_Post_Into_InboxArgs = { + budget: Scalars['Int']['input']; + details_json: Scalars['String']['input']; + persona_id: Scalars['String']['input']; + title: Scalars['String']['input']; +}; + + export type MutationCloudtool_Post_ResultArgs = { input: CloudtoolResultInput; }; @@ -762,21 +840,54 @@ export type MutationMake_Sure_Have_ExpertArgs = { }; +export type MutationMarketplace_InstallArgs = { + fgroup_id: Scalars['String']['input']; + marketable_name: Scalars['String']['input']; +}; + + +export type MutationMarketplace_UpgradeArgs = { + fgroup_id: Scalars['String']['input']; + marketable_name: Scalars['String']['input']; + specific_version: Scalars['String']['input']; +}; + + export type MutationMarketplace_Upsert_Dev_BotArgs = { marketable_description: Scalars['String']['input']; - marketable_expert_scheduled: FMarketplaceExpertInput; - marketable_expert_setup: FMarketplaceExpertInput; - marketable_expert_subchat: FMarketplaceExpertInput; - marketable_expert_todo: FMarketplaceExpertInput; + marketable_expert_default: FMarketplaceExpertInput; + marketable_expert_setup?: InputMaybe; + marketable_expert_subchat?: InputMaybe; + marketable_expert_todo?: InputMaybe; marketable_github_repo: Scalars['String']['input']; marketable_name: Scalars['String']['input']; + marketable_picture_big_b64?: InputMaybe; + marketable_picture_small_b64?: InputMaybe; marketable_run_this: Scalars['String']['input']; marketable_setup_default: Scalars['String']['input']; + marketable_title1: Scalars['String']['input']; + marketable_title2: Scalars['String']['input']; marketable_version: Scalars['String']['input']; ws_id: Scalars['String']['input']; }; +export type MutationMcp_Server_CreateArgs = { + input: FMcpServerInput; +}; + + +export type MutationMcp_Server_DeleteArgs = { + id: Scalars['String']['input']; +}; + + +export type MutationMcp_Server_PatchArgs = { + id: Scalars['String']['input']; + patch: FMcpServerPatch; +}; + + export type MutationPassword_ChangeArgs = { new_password: Scalars['String']['input']; old_password: Scalars['String']['input']; @@ -834,14 +945,10 @@ export type MutationStats_AddArgs = { }; -export type MutationTech_Support_ActivateArgs = { - ws_id: Scalars['String']['input']; -}; - - -export type MutationTech_Support_Set_ConfigArgs = { - config: TechSupportSettingsInput; - ws_id: Scalars['String']['input']; +export type MutationThread_App_Capture_PatchArgs = { + ft_app_searchable?: InputMaybe; + ft_app_specific?: InputMaybe; + ft_id: Scalars['String']['input']; }; @@ -873,6 +980,7 @@ export type MutationThread_Mass_Group_PatchArgs = { export type MutationThread_Messages_Create_MultipleArgs = { + delete_negative?: InputMaybe>; input: FThreadMultipleMessagesInput; }; @@ -883,12 +991,6 @@ export type MutationThread_PatchArgs = { }; -export type MutationThread_Provide_ToolsetArgs = { - ft_id: Scalars['String']['input']; - toolset: Scalars['String']['input']; -}; - - export type MutationThread_Reset_ErrorArgs = { ft_error: Scalars['String']['input']; ft_id: Scalars['String']['input']; @@ -955,8 +1057,7 @@ export type Query = { api_key_list: Array; audit_list: Array; cloud_tools_list: Array; - expert_choice_consequences: Array; - expert_choice_consequences2: FExpertChoiceConsequences; + expert_choice_consequences: FExpertChoiceConsequences; expert_get: FExpertOutput; expert_list: Array; experts_effective_list: Array; @@ -969,15 +1070,19 @@ export type Query = { knowledge_item_get: FKnowledgeItemOutput; knowledge_item_list: Array; knowledge_vecdb_search: Array; + marketplace_details: Array; + marketplace_list: Array; + marketplace_search: Array; + mcp_server_get: FMcpServerOutput; + mcp_server_list: Array; permission_list: Array; persona_get: FPersonaOutput; persona_list: Array; - plugins_installed: Array; + persona_opened_in_ui: FPersonaOutput; query_basic_stuff: BasicStuffResult; reset_password_token_info: PasswordResetTokenInfo; stats_query: Array; stats_query_distinct: StatsDistinctOutput; - tech_support_get_config?: Maybe; thread_get: FThreadOutput; thread_list: Array; thread_messages_list: Array; @@ -1007,12 +1112,6 @@ export type QueryExpert_Choice_ConsequencesArgs = { }; -export type QueryExpert_Choice_Consequences2Args = { - fexp_id: Scalars['String']['input']; - inside_fgroup_id: Scalars['String']['input']; -}; - - export type QueryExpert_GetArgs = { id: Scalars['String']['input']; }; @@ -1084,6 +1183,38 @@ export type QueryKnowledge_Vecdb_SearchArgs = { }; +export type QueryMarketplace_DetailsArgs = { + fgroup_id: Scalars['String']['input']; + marketable_name: Scalars['String']['input']; +}; + + +export type QueryMarketplace_ListArgs = { + fgroup_id: Scalars['String']['input']; + take?: Scalars['Int']['input']; +}; + + +export type QueryMarketplace_SearchArgs = { + fgroup_id: Scalars['String']['input']; + query: Scalars['String']['input']; + take?: Scalars['Int']['input']; +}; + + +export type QueryMcp_Server_GetArgs = { + id: Scalars['String']['input']; +}; + + +export type QueryMcp_Server_ListArgs = { + limit: Scalars['Int']['input']; + located_fgroup_id: Scalars['String']['input']; + skip: Scalars['Int']['input']; + sort_by?: Scalars['String']['input']; +}; + + export type QueryPermission_ListArgs = { fgroup_id: Scalars['String']['input']; }; @@ -1102,6 +1233,11 @@ export type QueryPersona_ListArgs = { }; +export type QueryPersona_Opened_In_UiArgs = { + persona_id: Scalars['String']['input']; +}; + + export type QueryQuery_Basic_StuffArgs = { want_invitations?: Scalars['Boolean']['input']; }; @@ -1142,11 +1278,6 @@ export type QueryStats_Query_DistinctArgs = { }; -export type QueryTech_Support_Get_ConfigArgs = { - ws_id: Scalars['String']['input']; -}; - - export type QueryThread_GetArgs = { id: Scalars['String']['input']; }; @@ -1204,6 +1335,7 @@ export type Subscription = { experts_in_group: FExpertSubs; external_data_sources_in_group: FExternalDataSourceSubs; knowledge_items_in_group: FKnowledgeItemSubs; + mcp_servers_in_group: FMcpServerSubs; permissions_in_group_subs: FPermissionSubs; persona_kanban_subs: FPersonaKanbanSubs; personas_in_group: FPersonaSubs; @@ -1247,6 +1379,14 @@ export type SubscriptionKnowledge_Items_In_GroupArgs = { }; +export type SubscriptionMcp_Servers_In_GroupArgs = { + filter?: Array; + limit?: Scalars['Int']['input']; + located_fgroup_id: Scalars['String']['input']; + sort_by?: Array; +}; + + export type SubscriptionPermissions_In_Group_SubsArgs = { fgroup_id: Scalars['String']['input']; limit: Scalars['Int']['input']; @@ -1282,23 +1422,6 @@ export type SubscriptionTree_SubscriptionArgs = { ws_id: Scalars['String']['input']; }; -export type TechSupportSettingsInput = { - support_api_key: Scalars['String']['input']; - support_channel_list: Array; - support_discord_key: Scalars['String']['input']; - support_fgroup_id: Scalars['String']['input']; - support_fuser_id: Scalars['String']['input']; -}; - -export type TechSupportSettingsOutput = { - __typename?: 'TechSupportSettingsOutput'; - support_api_key: Scalars['String']['output']; - support_channel_list: Array; - support_discord_key: Scalars['String']['output']; - support_fgroup_id: Scalars['String']['output']; - support_fuser_id: Scalars['String']['output']; -}; - export type TreeUpdateSubs = { __typename?: 'TreeUpdateSubs'; treeupd_action: Scalars['String']['output']; @@ -1345,7 +1468,7 @@ export type MessageCreateMultipleMutationVariables = Exact<{ }>; -export type MessageCreateMultipleMutation = { __typename?: 'Mutation', thread_messages_create_multiple: { __typename?: 'FThreadMessagesCreateResult', count: number } }; +export type MessageCreateMultipleMutation = { __typename?: 'Mutation', thread_messages_create_multiple: number }; export type ThreadPatchMutationVariables = Exact<{ id: Scalars['String']['input']; @@ -1368,7 +1491,7 @@ export type ModelsForExpertQueryVariables = Exact<{ }>; -export type ModelsForExpertQuery = { __typename?: 'Query', expert_choice_consequences: Array<{ __typename?: 'FModelItem', provm_name: string }> }; +export type ModelsForExpertQuery = { __typename?: 'Query', expert_choice_consequences: { __typename?: 'FExpertChoiceConsequences', models: Array<{ __typename?: 'FModelItem', provm_name: string }> } }; export type ToolsForGroupQueryVariables = Exact<{ located_fgroup_id: Scalars['String']['input']; @@ -1410,10 +1533,10 @@ export const ThreadsPageSubsDocument = {"kind":"Document","definitions":[{"kind" export const DeleteThreadDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"DeleteThread"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_delete"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}}]}]}}]} as unknown as DocumentNode; export const CreateThreadDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"CreateThread"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"FThreadInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_create"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ft_id"}}]}}]}}]} as unknown as DocumentNode; export const MessagesSubscriptionDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"subscription","name":{"kind":"Name","value":"MessagesSubscription"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"want_deltas"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"Boolean"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"comprehensive_thread_subs"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"ft_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"want_deltas"},"value":{"kind":"Variable","name":{"kind":"Name","value":"want_deltas"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"news_action"}},{"kind":"Field","name":{"kind":"Name","value":"news_payload_id"}},{"kind":"Field","name":{"kind":"Name","value":"news_payload_thread_message"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ft_app_specific"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_belongs_to_ft_id"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_alt"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_num"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_prev_alt"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_role"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_content"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_tool_calls"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_call_id"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_usage"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_created_ts"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_user_preferences"}}]}},{"kind":"Field","name":{"kind":"Name","value":"stream_delta"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ftm_role"}},{"kind":"Field","name":{"kind":"Name","value":"ftm_content"}}]}},{"kind":"Field","name":{"kind":"Name","value":"news_payload_thread"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"located_fgroup_id"}},{"kind":"Field","name":{"kind":"Name","value":"ft_id"}},{"kind":"Field","name":{"kind":"Name","value":"ft_need_user"}},{"kind":"Field","name":{"kind":"Name","value":"ft_need_assistant"}},{"kind":"Field","name":{"kind":"Name","value":"ft_fexp_id"}},{"kind":"Field","name":{"kind":"Name","value":"ft_confirmation_request"}},{"kind":"Field","name":{"kind":"Name","value":"ft_confirmation_response"}},{"kind":"Field","name":{"kind":"Name","value":"ft_title"}}]}}]}}]}}]} as unknown as DocumentNode; -export const MessageCreateMultipleDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"MessageCreateMultiple"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"FThreadMultipleMessagesInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_messages_create_multiple"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"count"}}]}}]}}]} as unknown as DocumentNode; +export const MessageCreateMultipleDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"MessageCreateMultiple"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"input"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"FThreadMultipleMessagesInput"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_messages_create_multiple"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"input"},"value":{"kind":"Variable","name":{"kind":"Name","value":"input"}}}]}]}}]} as unknown as DocumentNode; export const ThreadPatchDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"ThreadPatch"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"message"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_patch"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"id"}}},{"kind":"Argument","name":{"kind":"Name","value":"patch"},"value":{"kind":"ObjectValue","fields":[{"kind":"ObjectField","name":{"kind":"Name","value":"ft_error"},"value":{"kind":"Variable","name":{"kind":"Name","value":"message"}}}]}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ft_id"}}]}}]}}]} as unknown as DocumentNode; export const ExpertsForGroupDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ExpertsForGroup"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"experts_effective_list"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"located_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"fexp_id"}},{"kind":"Field","name":{"kind":"Name","value":"fexp_name"}}]}}]}}]} as unknown as DocumentNode; -export const ModelsForExpertDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ModelsForExpert"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"expert_choice_consequences"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"fexp_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"inside_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"provm_name"}}]}}]}}]} as unknown as DocumentNode; +export const ModelsForExpertDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ModelsForExpert"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"expert_choice_consequences"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"fexp_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"fexp_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"inside_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"inside_fgroup_id"}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"models"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"provm_name"}}]}}]}}]}}]} as unknown as DocumentNode; export const ToolsForGroupDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"ToolsForGroup"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}},"type":{"kind":"NonNullType","type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}}}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"cloud_tools_list"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"located_fgroup_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"located_fgroup_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"include_offline"},"value":{"kind":"BooleanValue","value":false}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ctool_confirmed_exists_ts"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_description"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_id"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_name"}},{"kind":"Field","name":{"kind":"Name","value":"ctool_parameters"}},{"kind":"Field","name":{"kind":"Name","value":"located_fgroup_id"}},{"kind":"Field","name":{"kind":"Name","value":"owner_fuser_id"}}]}}]}}]} as unknown as DocumentNode; export const ThreadConfirmationResponseDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"mutation","name":{"kind":"Name","value":"ThreadConfirmationResponse"},"variableDefinitions":[{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"confirmation_response"}},"type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}},"defaultValue":{"kind":"StringValue","value":"","block":false}},{"kind":"VariableDefinition","variable":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}},"type":{"kind":"NamedType","name":{"kind":"Name","value":"String"}},"defaultValue":{"kind":"StringValue","value":"","block":false}}],"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"thread_set_confirmation_response"},"arguments":[{"kind":"Argument","name":{"kind":"Name","value":"ft_id"},"value":{"kind":"Variable","name":{"kind":"Name","value":"ft_id"}}},{"kind":"Argument","name":{"kind":"Name","value":"confirmation_response"},"value":{"kind":"Variable","name":{"kind":"Name","value":"confirmation_response"}}}]}]}}]} as unknown as DocumentNode; export const BasicStuffDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"BasicStuff"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"query_basic_stuff"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"fuser_id"}},{"kind":"Field","name":{"kind":"Name","value":"my_own_ws_id"}},{"kind":"Field","name":{"kind":"Name","value":"workspaces"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"ws_id"}},{"kind":"Field","name":{"kind":"Name","value":"ws_owner_fuser_id"}},{"kind":"Field","name":{"kind":"Name","value":"ws_root_group_id"}},{"kind":"Field","name":{"kind":"Name","value":"root_group_name"}},{"kind":"Field","name":{"kind":"Name","value":"have_coins_exactly"}},{"kind":"Field","name":{"kind":"Name","value":"have_coins_enough"}},{"kind":"Field","name":{"kind":"Name","value":"have_admin"}}]}}]}}]}}]} as unknown as DocumentNode; diff --git a/refact-agent/gui/generated/schema.graphql b/refact-agent/gui/generated/schema.graphql index ab0316236..95ce5e3b4 100644 --- a/refact-agent/gui/generated/schema.graphql +++ b/refact-agent/gui/generated/schema.graphql @@ -15,10 +15,8 @@ type BasicStuffResult { input CloudtoolResultInput { dollars: Float! = 0 - ft_id: String! - ftm_alt: Int! + fcall_id: String! ftm_content: String! - ftm_num: Int! ftm_provenance: String! } @@ -90,6 +88,7 @@ type FExpertChoiceConsequences { input FExpertInput { fexp_allow_tools: String! + fexp_app_capture_tools: String! = "null" fexp_block_tools: String! fexp_name: String! fexp_python_kernel: String! @@ -101,6 +100,7 @@ input FExpertInput { type FExpertOutput { fexp_allow_tools: String! + fexp_app_capture_tools: JSON fexp_block_tools: String! fexp_id: String! fexp_name: String! @@ -157,6 +157,7 @@ type FExternalDataSourceSubs { } input FKanbanTaskInput { + details_json: String = null state: String! title: String! } @@ -204,17 +205,78 @@ type FKnowledgeItemSubs { input FMarketplaceExpertInput { fexp_allow_tools: String! + fexp_app_capture_tools: String! = "" fexp_block_tools: String! fexp_name: String! fexp_python_kernel: String! fexp_system_prompt: String! } +type FMarketplaceInstallOutput { + persona_id: String! +} + +type FMarketplaceOutput { + available_ws_id: String + marketable_description: String! + marketable_name: String! + marketable_picture_big: String + marketable_picture_small: String + marketable_popularity_counter: Int! + marketable_price: Int! + marketable_star_event: Int! + marketable_star_sum: Int! + marketable_title1: String! + marketable_title2: String! + marketable_version: String! + seller_fuser_id: String +} + type FMassInvitationOutput { fuser_id: String! result: String! } +input FMcpServerInput { + located_fgroup_id: String! + mcp_command: String! + mcp_description: String! = "" + mcp_enabled: Boolean! = false + mcp_env_vars: JSON = null + mcp_name: String! +} + +type FMcpServerOutput { + located_fgroup_id: String! + mcp_command: String! + mcp_created_ts: Float! + mcp_description: String! + mcp_enabled: Boolean! + mcp_env_vars: JSON + mcp_id: String! + mcp_modified_ts: Float! + mcp_name: String! + owner_fuser_id: String! + owner_shared: Boolean! +} + +input FMcpServerPatch { + located_fgroup_id: String = null + mcp_command: String = null + mcp_description: String = null + mcp_enabled: Boolean = null + mcp_env_vars: JSON = null + mcp_name: String = null + owner_shared: Boolean = null +} + +type FMcpServerSubs { + news_action: String! + news_payload: FMcpServerOutput + news_payload_id: String! + news_pubsub: String! +} + type FModelItem { provm_name: String! } @@ -236,6 +298,11 @@ type FPermissionSubs { news_pubsub: String! } +type FPersonaHistoryItemOutput { + ft_id: String! + title: String! +} + input FPersonaInput { located_fgroup_id: String! persona_discounts: String = null @@ -246,8 +313,8 @@ input FPersonaInput { } type FPersonaKanbanSubs { + bucket: String! news_action: String! - news_bucket: String! news_payload_id: String! news_payload_task: FPersonaKanbanTaskOutput } @@ -269,7 +336,12 @@ type FPersonaKanbanTaskOutput { } type FPersonaOutput { + history: [FPersonaHistoryItemOutput!] + latest_ft_id: String located_fgroup_id: String! + marketable_docker_image: String + marketable_run_this: String + marketable_setup_default: JSON owner_fuser_id: String! persona_archived_ts: Float! persona_created_ts: Float! @@ -279,6 +351,8 @@ type FPersonaOutput { persona_marketable_name: String! persona_marketable_version: String! persona_name: String! + persona_picture_big: String + persona_picture_small: String persona_setup: JSON! } @@ -298,12 +372,6 @@ type FPersonaSubs { news_pubsub: String! } -type FPluginOutput { - plugin_name: String! - plugin_setup_page: String! - plugin_version: String! -} - input FStatsAddInput { fgroup_id: String! = "" st_chart: Int! @@ -386,11 +454,6 @@ type FThreadMessageSubs { stream_delta: FThreadDelta } -type FThreadMessagesCreateResult { - count: Int! - messages: [FThreadMessageOutput!]! -} - input FThreadMultipleMessagesInput { ftm_belongs_to_ft_id: String! messages: [FThreadMessageInput!]! @@ -508,6 +571,7 @@ type Mutation { bot_activate(activation_type: String!, first_calls: String!, first_question: String!, localtools: String!, persona_id: String!, title: String!, who_is_asking: String!): FThreadOutput! bot_arrange_kanban_situation(persona_id: String!, tasks: [FKanbanTaskInput!]!, ws_id: String!): Boolean! bot_install_from_marketplace(inside_fgroup_id: String!, new_setup: String!, persona_id: String!, persona_marketable_name: String!, persona_marketable_version: String!, persona_name: String!): Boolean! + bot_kanban_post_into_inbox(budget: Int!, details_json: String!, persona_id: String!, title: String!): Boolean! cloudtool_post_result(input: CloudtoolResultInput!): Boolean! create_captured_thread(input: FThreadInput!, on_behalf_of_fuser_id: String = null): FThreadOutput! email_confirm(token: String!): EmailConfirmResult! @@ -529,7 +593,12 @@ type Mutation { knowledge_item_mass_group_patch(dst_group_id: String!, src_group_id: String!): Int! knowledge_item_patch(id: String!, patch: FKnowledgeItemPatch!): FKnowledgeItemOutput! make_sure_have_expert(fexp_name: String!, fgroup_id: String, owner_fuser_id: String, python_kernel: String!, system_prompt: String!): String! - marketplace_upsert_dev_bot(marketable_description: String!, marketable_expert_scheduled: FMarketplaceExpertInput!, marketable_expert_setup: FMarketplaceExpertInput!, marketable_expert_subchat: FMarketplaceExpertInput!, marketable_expert_todo: FMarketplaceExpertInput!, marketable_github_repo: String!, marketable_name: String!, marketable_run_this: String!, marketable_setup_default: String!, marketable_version: String!, ws_id: String!): FBotInstallOutput! + marketplace_install(fgroup_id: String!, marketable_name: String!): FMarketplaceInstallOutput! + marketplace_upgrade(fgroup_id: String!, marketable_name: String!, specific_version: String!): Boolean! + marketplace_upsert_dev_bot(marketable_description: String!, marketable_expert_default: FMarketplaceExpertInput!, marketable_expert_setup: FMarketplaceExpertInput, marketable_expert_subchat: FMarketplaceExpertInput, marketable_expert_todo: FMarketplaceExpertInput, marketable_github_repo: String!, marketable_name: String!, marketable_picture_big_b64: String = null, marketable_picture_small_b64: String = null, marketable_run_this: String!, marketable_setup_default: String!, marketable_title1: String!, marketable_title2: String!, marketable_version: String!, ws_id: String!): FBotInstallOutput! + mcp_server_create(input: FMcpServerInput!): FMcpServerOutput! + mcp_server_delete(id: String!): Boolean! + mcp_server_patch(id: String!, patch: FMcpServerPatch!): FMcpServerOutput! password_change(new_password: String!, old_password: String!): Boolean! permission_delete(fgroup_id: String!, fuser_id: String!): Boolean! permission_patch(fgroup_id: String!, fuser_id: String!, patch: FPermissionPatch!): FPermissionOutput! @@ -541,16 +610,14 @@ type Mutation { session_open(password: String!, username: String!): String! session_renew: String! stats_add(records: [FStatsAddInput!]!): Boolean! - tech_support_activate(ws_id: String!): Boolean! - tech_support_set_config(config: TechSupportSettingsInput!, ws_id: String!): Boolean! + thread_app_capture_patch(ft_app_searchable: String = null, ft_app_specific: String = null, ft_id: String!): Boolean! thread_clear_confirmation(ft_id: String!): Boolean! thread_create(input: FThreadInput!): FThreadOutput! thread_delete(id: String!): Boolean! thread_lock(ft_id: String!, worker_name: String!): Boolean! thread_mass_group_patch(dst_group_id: String!, src_group_id: String!): Int! - thread_messages_create_multiple(input: FThreadMultipleMessagesInput!): FThreadMessagesCreateResult! + thread_messages_create_multiple(delete_negative: [Int!] = null, input: FThreadMultipleMessagesInput!): Int! thread_patch(id: String!, patch: FThreadPatch!): FThreadOutput! - thread_provide_toolset(ft_id: String!, toolset: String!): Boolean! thread_reset_error(ft_error: String!, ft_id: String!): Boolean! thread_reset_title(ft_id: String!, ft_title: String!): Boolean! thread_set_confirmation_request(confirmation_request: String!, ft_id: String!): Boolean! @@ -572,8 +639,7 @@ type Query { api_key_list: [FApiKeyOutput!]! audit_list(limit: Int!, skip: Int!, ws_id: String!): [FAuditRecordOutput!]! cloud_tools_list(include_offline: Boolean! = false, located_fgroup_id: String!): [FCloudTool!]! - expert_choice_consequences(fexp_id: String!, inside_fgroup_id: String!): [FModelItem!]! - expert_choice_consequences2(fexp_id: String!, inside_fgroup_id: String!): FExpertChoiceConsequences! + expert_choice_consequences(fexp_id: String!, inside_fgroup_id: String!): FExpertChoiceConsequences! expert_get(id: String!): FExpertOutput! expert_list(limit: Int!, located_fgroup_id: String!, skip: Int!, sort_by: String! = ""): [FExpertOutput!]! experts_effective_list(located_fgroup_id: String!): [FExpertOutput!]! @@ -586,15 +652,19 @@ type Query { knowledge_item_get(id: String!): FKnowledgeItemOutput! knowledge_item_list(limit: Int!, located_fgroup_id: String!, skip: Int!, sort_by: String! = ""): [FKnowledgeItemOutput!]! knowledge_vecdb_search(fgroup_id: String!, q: String!, top_n: Int! = 5): [FKnowledgeItemOutput!]! + marketplace_details(fgroup_id: String!, marketable_name: String!): [FMarketplaceOutput!]! + marketplace_list(fgroup_id: String!, take: Int! = 20): [FMarketplaceOutput!]! + marketplace_search(fgroup_id: String!, query: String!, take: Int! = 20): [FMarketplaceOutput!]! + mcp_server_get(id: String!): FMcpServerOutput! + mcp_server_list(limit: Int!, located_fgroup_id: String!, skip: Int!, sort_by: String! = ""): [FMcpServerOutput!]! permission_list(fgroup_id: String!): [FPermissionOutput!]! persona_get(id: String!): FPersonaOutput! persona_list(limit: Int!, located_fgroup_id: String!, skip: Int!, sort_by: String! = ""): [FPersonaOutput!]! - plugins_installed: [FPluginOutput!]! + persona_opened_in_ui(persona_id: String!): FPersonaOutput! query_basic_stuff(want_invitations: Boolean! = false): BasicStuffResult! reset_password_token_info(token: String!): PasswordResetTokenInfo! stats_query(breakdown_fexp_name: [String!]!, breakdown_fuser_id: [String!]!, breakdown_model: [String!]!, fgroup_id: String! = "", filter_fexp_id: [String!]! = [], filter_fuser_id: [String!]! = [], filter_model: [String!]! = [], filter_thing: [String!]! = [], st_chart: Int!, st_span: String!, timekey_from: String!, timekey_to: String!, ws_id: String! = ""): [FStatsOutput!]! stats_query_distinct(fgroup_id: String!, filter_fexp_id: [String!]!, filter_fuser_id: [String!]!, filter_model: [String!]!, st_chart: Int!, st_span: String!, timekey_from: String!, timekey_to: String!, ws_id: String!): StatsDistinctOutput! - tech_support_get_config(ws_id: String!): TechSupportSettingsOutput thread_get(id: String!): FThreadOutput! thread_list(limit: Int!, located_fgroup_id: String!, skip: Int!, sort_by: String! = ""): [FThreadOutput!]! thread_messages_list(ft_id: String!, ftm_alt: Int = null): [FThreadMessageOutput!]! @@ -624,6 +694,7 @@ type Subscription { experts_in_group(filter: [String!]! = [], limit: Int! = 0, located_fgroup_id: String!, sort_by: [String!]! = []): FExpertSubs! external_data_sources_in_group(filter: [String!]! = [], limit: Int! = 0, located_fgroup_id: String!, sort_by: [String!]! = []): FExternalDataSourceSubs! knowledge_items_in_group(filter: [String!]! = [], limit: Int! = 0, located_fgroup_id: String!, sort_by: [String!]! = []): FKnowledgeItemSubs! + mcp_servers_in_group(filter: [String!]! = [], limit: Int! = 0, located_fgroup_id: String!, sort_by: [String!]! = []): FMcpServerSubs! permissions_in_group_subs(fgroup_id: String!, limit: Int!, quicksearch: String!): FPermissionSubs! persona_kanban_subs(limit_done: Int! = 30, limit_garbage: Int! = 30, limit_inbox: Int! = 30, persona_id: String!): FPersonaKanbanSubs! personas_in_group(filter: [String!]! = [], limit: Int! = 0, located_fgroup_id: String!, sort_by: [String!]! = []): FPersonaSubs! @@ -631,22 +702,6 @@ type Subscription { tree_subscription(ws_id: String!): TreeUpdateSubs! } -input TechSupportSettingsInput { - support_api_key: String! - support_channel_list: [String!]! - support_discord_key: String! - support_fgroup_id: String! - support_fuser_id: String! -} - -type TechSupportSettingsOutput { - support_api_key: String! - support_channel_list: [String!]! - support_discord_key: String! - support_fgroup_id: String! - support_fuser_id: String! -} - type TreeUpdateSubs { treeupd_action: String! treeupd_id: String! diff --git a/refact-agent/gui/package.json b/refact-agent/gui/package.json index 1995e1708..3e0660855 100644 --- a/refact-agent/gui/package.json +++ b/refact-agent/gui/package.json @@ -175,5 +175,8 @@ "@rollup/rollup-win32-arm64-msvc": "^4.44.1", "@rollup/rollup-win32-ia32-msvc": "^4.44.1", "@rollup/rollup-win32-x64-msvc": "^4.44.1" + }, + "overrides": { + "stylus": "github:stylus/stylus#0.59.0" } } diff --git a/refact-agent/gui/src/__fixtures__/chat.ts b/refact-agent/gui/src/__fixtures__/chat.ts index 37a66a07d..912766730 100644 --- a/refact-agent/gui/src/__fixtures__/chat.ts +++ b/refact-agent/gui/src/__fixtures__/chat.ts @@ -1,5 +1,6 @@ import type { RootState } from "../app/store"; -import { FTMMessage } from "../features/ThreadMessages/makeMessageTrie"; +import type { BaseMessage } from "../services/refact/types"; +// import { FTMMessage } from "../features/ThreadMessages/makeMessageTrie"; export * from "./some_chrome_screenshots"; @@ -951,7 +952,7 @@ export const TOOL_IMAGE_STUB: ChatMessages = [ }; }); -export const CHAT_WITH_KNOWLEDGE_TOOL: FTMMessage[] = [ +export const CHAT_WITH_KNOWLEDGE_TOOL: BaseMessage[] = [ { ftm_role: "system", ftm_content: diff --git a/refact-agent/gui/src/__fixtures__/chat_config_thread.ts b/refact-agent/gui/src/__fixtures__/chat_config_thread.ts index cdc900e4c..a3c4200a5 100644 --- a/refact-agent/gui/src/__fixtures__/chat_config_thread.ts +++ b/refact-agent/gui/src/__fixtures__/chat_config_thread.ts @@ -1,437 +1,416 @@ -import type { Chat } from "../features/Chat/Thread"; +import type { BaseMessage } from "../services/refact/types"; -export const CHAT_CONFIG_THREAD: Chat = { - streaming: false, - thread: { - mode: "CONFIGURE", - id: "941fb8f4-409c-4430-a3b2-6450fafdb9f4", - messages: [ - { - ftm_role: "user", - ftm_content: - "🔧 The postgres tool should be visible now. To test the tool, list the tables available, briefly desctibe the tables and express\nsatisfaction and relief if it works, and change nothing. If it doesn't work or the tool isn't available, go through the usual plan in the system prompt.\nThe current config file is .\n", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", - function: { - arguments: - '{"query":"\\nSELECT table_name\\nFROM information_schema.tables\\nWHERE table_schema=\'public\';"}', - name: "postgres", - }, - type: "function", - index: 0, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", - ftm_content: - "🧩 for configuration go to SETTINGS:postgres, psql failed:\nNo such file or directory (os error 2)", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_kw6TJChemYjXEGL9mLL3T0mN", - function: { - arguments: '{"path":"/Users/marc/Projects/refact-lsp"}', - name: "tree", - }, - type: "function", - index: 0, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_kw6TJChemYjXEGL9mLL3T0mN", - ftm_content: - "/\n Users/\n marc/\n Projects/\n refact-lsp/\n .dockerignore\n .gitattributes\n .gitignore\n CODE_OF_CONDUCT.md\n CONTRIBUTING.md\n Cargo.lock\n Cargo.toml\n Cross.toml\n INTEGRATIONS.md\n LICENSE\n README.md\n build.rs\n tests/\n __init__.py\n lsp_connect.py\n test01_completion_edge_cases.py\n test02_completion_with_rag.py\n test03_at_commands_completion.py\n test04_completion_lsp.py\n test05_is_openai_compatible.py\n test06_tool_not_tool.py\n test07_memories.py\n test08_post_processing.py\n test09_ast_pick_up_changes.py\n test10_locate.py\n test11_patch.py\n test11_patch_partial_edit.py\n test12_tools_authorize_calls.py\n test13_vision.py\n test_diff_handlers.py\n test13_data/\n 200.jpg\n 530.jpg\n test11_data/\n already_applied_rewrite_symbol_01.py\n already_applied_rewrite_symbol_02.py\n toad_orig.py\n toad_partial_edit_01.py\n toad_partial_edit_02.py\n toad_rewrite_symbol_01.py\n toad_rewrite_symbol_02.py\n toad_rewrite_symbol_03.py\n toad_rewrite_symbol_04_orig.rs\n toad_rewrite_symbol_04_patched.rs\n emergency_frog_situation/\n frog.py\n holiday.py\n jump_to_conclusions.py\n set_as_avatar.py\n work_day.py\n src/\n background_tasks.rs\n cached_tokenizers.rs\n call_validation.rs\n caps.rs\n completion_cache.rs\n custom_error.rs\n diffs.rs\n fetch_embedding.rs\n file_filter.rs\n files_correction.rs\n files_in_jsonl.rs\n files_in_workspace.rs\n forward_to_hf_endpoint.rs\n forward_to_openai_endpoint.rs\n fuzzy_search.rs\n git.rs\n global_context.rs\n http.rs\n knowledge.rs\n known_models.rs\n lsp.rs\n main.rs\n nicer_logs.rs\n privacy.rs\n privacy_compiled_in.rs\n restream.rs\n scratchpad_abstract.rs\n subchat.rs\n version.rs\n yaml_configs/\n create_configs.rs\n customization_compiled_in.rs\n customization_loader.rs\n mod.rs\n vecdb/\n mod.rs\n vdb_cache.rs\n vdb_file_splitter.rs\n vdb_highlev.rs\n vdb_lance.rs\n vdb_remote.rs\n vdb_structs.rs\n vdb_thread.rs\n tools/\n mod.rs\n tool_ast_definition.rs\n tool_ast_reference.rs\n tool_cat.rs\n tool_cmdline.rs\n tool_deep_thinking.rs\n tool_knowledge.rs\n tool_locate_search.rs\n tool_patch.rs\n tool_relevant_files.rs\n tool_search.rs\n tool_tree.rs\n tool_web.rs\n tools_description.rs\n tools_execute.rs\n tool_patch_aux/\n ast_lint.rs\n diff_apply.rs\n diff_structs.rs\n fs_utils.rs\n mod.rs\n no_model_edit.rs\n postprocessing_utils.rs\n tickets_parsing.rs\n model_based_edit/\n blocks_of_code_parser.rs\n mod.rs\n model_execution.rs\n partial_edit.rs\n whole_file_parser.rs\n telemetry/\n basic_comp_counters.rs\n basic_network.rs\n basic_robot_human.rs\n basic_transmit.rs\n mod.rs\n snippets_collection.rs\n snippets_transmit.rs\n telemetry_structs.rs\n utils.rs\n scratchpads/\n chat_generic.rs\n chat_llama2.rs\n chat_passthrough.rs\n chat_utils_deltadelta.rs\n chat_utils_limit_history.rs\n chat_utils_prompts.rs\n code_completion_fim.rs\n code_completion_replace.rs\n comments_parser.rs\n mod.rs\n multimodality.rs\n passthrough_convert_messages.rs\n scratchpad_utils.rs\n postprocessing/\n mod.rs\n pp_command_output.rs\n pp_context_files.rs\n pp_plain_text.rs\n pp_utils.rs\n integrations/\n config_chat.rs\n integr_abstract.rs\n integr_chrome.rs\n integr_github.rs\n integr_gitlab.rs\n integr_pdb.rs\n integr_postgres.rs\n mod.rs\n process_io_utils.rs\n running_integrations.rs\n sessions.rs\n setting_up_integrations.rs\n yaml_schema.rs\n docker/\n docker_container_manager.rs\n docker_ssh_tunnel_utils.rs\n integr_docker.rs\n mod.rs\n http/\n routers.rs\n utils.rs\n routers/\n info.rs\n v1.rs\n v1/\n ast.rs\n at_commands.rs\n at_tools.rs\n caps.rs\n chat.rs\n code_completion.rs\n code_lens.rs\n customization.rs\n dashboard.rs\n docker.rs\n git.rs\n graceful_shutdown.rs\n gui_help_handlers.rs\n handlers_memdb.rs\n links.rs\n lsp_like_handlers.rs\n patch.rs\n snippet_accepted.rs\n status.rs\n subchat.rs\n sync_files.rs\n system_prompt.rs\n telemetry_network.rs\n v1_integrations.rs\n vecdb.rs\n dashboard/\n dashboard.rs\n mod.rs\n structs.rs\n utils.rs\n at_commands/\n at_ast_definition.rs\n at_ast_reference.rs\n at_commands.rs\n at_file.rs\n at_search.rs\n at_tree.rs\n at_web.rs\n execute_at.rs\n mod.rs\n ast/\n ast_db.rs\n ast_indexer_thread.rs\n ast_parse_anything.rs\n ast_structs.rs\n chunk_utils.rs\n dummy_tokenizer.json\n file_splitter.rs\n linters.rs\n mod.rs\n parse_common.rs\n parse_python.rs\n treesitter/\n ast_instance_structs.rs\n file_ast_markup.rs\n language_id.rs\n mod.rs\n parsers.rs\n skeletonizer.rs\n structs.rs\n parsers/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n tests.rs\n ts.rs\n utils.rs\n tests/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n ts.rs\n cases/\n ts/\n main.ts\n main.ts.json\n person.ts\n person.ts.decl_json\n person.ts.skeleton\n rust/\n main.rs\n main.rs.json\n point.rs\n point.rs.decl_json\n point.rs.skeleton\n python/\n calculator.py\n calculator.py.decl_json\n calculator.py.skeleton\n main.py\n main.py.json\n js/\n car.js\n car.js.decl_json\n car.js.skeleton\n main.js\n main.js.json\n java/\n main.java\n main.java.json\n person.java\n person.java.decl_json\n person.java.skeleton\n cpp/\n circle.cpp\n circle.cpp.decl_json\n circle.cpp.skeleton\n main.cpp\n main.cpp.json\n alt_testsuite/\n cpp_goat_library.correct\n cpp_goat_library.h\n cpp_goat_main.correct\n cpp_goat_main.cpp\n jump_to_conclusions_annotated.py\n py_goat_library.correct\n py_goat_library.py\n py_goat_library_annotated.py\n py_goat_main.py\n py_goat_main_annotated.py\n py_torture1_attr.py\n py_torture1_attr_annotated.py\n py_torture2_resolving.py\n py_torture2_resolving_annotated.py\n python_binding_and_cmdline/\n setup.py\n refact/\n __init__.py\n chat_client.py\n cli_app_switcher.py\n cli_export.py\n cli_inspect.py\n cli_main.py\n cli_markdown.py\n cli_printing.py\n cli_settings.py\n cli_statusbar.py\n cli_streaming.py\n lsp_runner.py\n traj_compressor.py\n examples/\n ast_definition.sh\n ast_references.sh\n chat_with_at_command.py\n http_caps.sh\n http_chat.sh\n http_chat_passthrough.sh\n http_completion.sh\n http_rag_status.sh\n http_subchat.sh\n http_vecdb_search.sh\n lsp_runner.py\n note3.py\n rag_skeletonize_video.py\n docker/\n lsp-debug.Dockerfile\n chrome/\n mac_arm-130.0.6723.69/\n chrome-mac-arm64/\n Google Chrome for Testing.app/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest\n en.lproj/\n Localizable.strings\n Frameworks/\n Google Chrome for Testing Framework.framework/\n Versions/\n 130.0.6723.69/\n Libraries/\n WidevineCdm/\n _platform_specific/\n mac_arm64/\n libwidevinecdm.dylib\n bring_your_own_key/\n hf.yaml\n mixed.yaml\n openai.yaml\n openrouter.yaml", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_4we2wH5H50A2m6CIJqKbzYH8", - function: { - arguments: '{"paths": "Cargo.toml", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 0, - }, - { - id: "call_vmIGl31ytfpLWPkc138HJnxz", - function: { - arguments: '{"paths": "README.md", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 1, - }, - { - id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", - function: { - arguments: - '{"paths": "tests/emergency_frog_situation/frog.py", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 2, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", - ftm_content: - "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", - }, - { - ftm_role: "tool", - ftm_call_id: "call_vmIGl31ytfpLWPkc138HJnxz", - ftm_content: - 'Path problems:\n\nThe path "README.md" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "tool", - ftm_call_id: "call_4we2wH5H50A2m6CIJqKbzYH8", - ftm_content: - 'Path problems:\n\nThe path "Cargo.toml" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "frog.py", - file_content: - 'import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n def croak(self, n_times):\n for n in range(n_times):\n print("croak")\n\n\nclass AlternativeFrog:\n def alternative_jump(self):\n pass\n\n\ndef standalone_jumping_function():\n print("I\'m a frog! Jump! Jump! Really!")\n', - line1: 1, - line2: 39, - }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_sNnvyOLlmulfHIikJzKhmz55", - function: { - arguments: '{"paths": "src/Cargo.toml", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 0, - }, - { - id: "call_n9FACVDl6a4tllgyWaLCl8s9", - function: { - arguments: '{"paths": "src/main.rs", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 1, - }, - { - id: "call_1DUkSfLq6Io8xowqVjJEuybM", - function: { - arguments: '{"paths": "src/lib.rs", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 2, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_1DUkSfLq6Io8xowqVjJEuybM", - ftm_content: - 'Path problems:\n\nThe path "src/lib.rs" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "tool", - ftm_call_id: "call_n9FACVDl6a4tllgyWaLCl8s9", - ftm_content: - 'Path problems:\n\nThe path "src/main.rs" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "tool", - ftm_call_id: "call_sNnvyOLlmulfHIikJzKhmz55", - ftm_content: - 'Path problems:\n\nThe path "src/Cargo.toml" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_h8biZ8JNUjeWGbL9CCatbPcf", - function: { - arguments: '{"paths":"src/background_tasks.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_h8biZ8JNUjeWGbL9CCatbPcf", - ftm_content: - 'Path problems:\n\nThe path "src/background_tasks.rs" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", - function: { - arguments: - '{"paths":"src/tools/integr_postgres.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", - ftm_content: - 'Path problems:\n\nThe path "src/tools/integr_postgres.rs" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_RXDRwr3yCJEplHud4xRoxWlC", - function: { - arguments: - '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_RXDRwr3yCJEplHud4xRoxWlC", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_OvYAforNKAFz0d7mv96AnTVR", - function: { - arguments: - '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], +export const CHAT_CONFIG_THREAD: BaseMessage[] = [ + { + ftm_role: "user", + ftm_content: + "🔧 The postgres tool should be visible now. To test the tool, list the tables available, briefly desctibe the tables and express\nsatisfaction and relief if it works, and change nothing. If it doesn't work or the tool isn't available, go through the usual plan in the system prompt.\nThe current config file is .\n", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", + function: { + arguments: + '{"query":"\\nSELECT table_name\\nFROM information_schema.tables\\nWHERE table_schema=\'public\';"}', + name: "postgres", + }, + type: "function", + index: 0, }, - { - ftm_role: "tool", - ftm_call_id: "call_OvYAforNKAFz0d7mv96AnTVR", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", + ftm_content: + "🧩 for configuration go to SETTINGS:postgres, psql failed:\nNo such file or directory (os error 2)", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_kw6TJChemYjXEGL9mLL3T0mN", + function: { + arguments: '{"path":"/Users/marc/Projects/refact-lsp"}', + name: "tree", + }, + type: "function", + index: 0, }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_3wuMILBYl0z7pBMUoJOwohQf", - function: { - arguments: - '{"paths":"src/integrations/integr_github.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_kw6TJChemYjXEGL9mLL3T0mN", + ftm_content: + "/\n Users/\n marc/\n Projects/\n refact-lsp/\n .dockerignore\n .gitattributes\n .gitignore\n CODE_OF_CONDUCT.md\n CONTRIBUTING.md\n Cargo.lock\n Cargo.toml\n Cross.toml\n INTEGRATIONS.md\n LICENSE\n README.md\n build.rs\n tests/\n __init__.py\n lsp_connect.py\n test01_completion_edge_cases.py\n test02_completion_with_rag.py\n test03_at_commands_completion.py\n test04_completion_lsp.py\n test05_is_openai_compatible.py\n test06_tool_not_tool.py\n test07_memories.py\n test08_post_processing.py\n test09_ast_pick_up_changes.py\n test10_locate.py\n test11_patch.py\n test11_patch_partial_edit.py\n test12_tools_authorize_calls.py\n test13_vision.py\n test_diff_handlers.py\n test13_data/\n 200.jpg\n 530.jpg\n test11_data/\n already_applied_rewrite_symbol_01.py\n already_applied_rewrite_symbol_02.py\n toad_orig.py\n toad_partial_edit_01.py\n toad_partial_edit_02.py\n toad_rewrite_symbol_01.py\n toad_rewrite_symbol_02.py\n toad_rewrite_symbol_03.py\n toad_rewrite_symbol_04_orig.rs\n toad_rewrite_symbol_04_patched.rs\n emergency_frog_situation/\n frog.py\n holiday.py\n jump_to_conclusions.py\n set_as_avatar.py\n work_day.py\n src/\n background_tasks.rs\n cached_tokenizers.rs\n call_validation.rs\n caps.rs\n completion_cache.rs\n custom_error.rs\n diffs.rs\n fetch_embedding.rs\n file_filter.rs\n files_correction.rs\n files_in_jsonl.rs\n files_in_workspace.rs\n forward_to_hf_endpoint.rs\n forward_to_openai_endpoint.rs\n fuzzy_search.rs\n git.rs\n global_context.rs\n http.rs\n knowledge.rs\n known_models.rs\n lsp.rs\n main.rs\n nicer_logs.rs\n privacy.rs\n privacy_compiled_in.rs\n restream.rs\n scratchpad_abstract.rs\n subchat.rs\n version.rs\n yaml_configs/\n create_configs.rs\n customization_compiled_in.rs\n customization_loader.rs\n mod.rs\n vecdb/\n mod.rs\n vdb_cache.rs\n vdb_file_splitter.rs\n vdb_highlev.rs\n vdb_lance.rs\n vdb_remote.rs\n vdb_structs.rs\n vdb_thread.rs\n tools/\n mod.rs\n tool_ast_definition.rs\n tool_ast_reference.rs\n tool_cat.rs\n tool_cmdline.rs\n tool_deep_thinking.rs\n tool_knowledge.rs\n tool_locate_search.rs\n tool_patch.rs\n tool_relevant_files.rs\n tool_search.rs\n tool_tree.rs\n tool_web.rs\n tools_description.rs\n tools_execute.rs\n tool_patch_aux/\n ast_lint.rs\n diff_apply.rs\n diff_structs.rs\n fs_utils.rs\n mod.rs\n no_model_edit.rs\n postprocessing_utils.rs\n tickets_parsing.rs\n model_based_edit/\n blocks_of_code_parser.rs\n mod.rs\n model_execution.rs\n partial_edit.rs\n whole_file_parser.rs\n telemetry/\n basic_comp_counters.rs\n basic_network.rs\n basic_robot_human.rs\n basic_transmit.rs\n mod.rs\n snippets_collection.rs\n snippets_transmit.rs\n telemetry_structs.rs\n utils.rs\n scratchpads/\n chat_generic.rs\n chat_llama2.rs\n chat_passthrough.rs\n chat_utils_deltadelta.rs\n chat_utils_limit_history.rs\n chat_utils_prompts.rs\n code_completion_fim.rs\n code_completion_replace.rs\n comments_parser.rs\n mod.rs\n multimodality.rs\n passthrough_convert_messages.rs\n scratchpad_utils.rs\n postprocessing/\n mod.rs\n pp_command_output.rs\n pp_context_files.rs\n pp_plain_text.rs\n pp_utils.rs\n integrations/\n config_chat.rs\n integr_abstract.rs\n integr_chrome.rs\n integr_github.rs\n integr_gitlab.rs\n integr_pdb.rs\n integr_postgres.rs\n mod.rs\n process_io_utils.rs\n running_integrations.rs\n sessions.rs\n setting_up_integrations.rs\n yaml_schema.rs\n docker/\n docker_container_manager.rs\n docker_ssh_tunnel_utils.rs\n integr_docker.rs\n mod.rs\n http/\n routers.rs\n utils.rs\n routers/\n info.rs\n v1.rs\n v1/\n ast.rs\n at_commands.rs\n at_tools.rs\n caps.rs\n chat.rs\n code_completion.rs\n code_lens.rs\n customization.rs\n dashboard.rs\n docker.rs\n git.rs\n graceful_shutdown.rs\n gui_help_handlers.rs\n handlers_memdb.rs\n links.rs\n lsp_like_handlers.rs\n patch.rs\n snippet_accepted.rs\n status.rs\n subchat.rs\n sync_files.rs\n system_prompt.rs\n telemetry_network.rs\n v1_integrations.rs\n vecdb.rs\n dashboard/\n dashboard.rs\n mod.rs\n structs.rs\n utils.rs\n at_commands/\n at_ast_definition.rs\n at_ast_reference.rs\n at_commands.rs\n at_file.rs\n at_search.rs\n at_tree.rs\n at_web.rs\n execute_at.rs\n mod.rs\n ast/\n ast_db.rs\n ast_indexer_thread.rs\n ast_parse_anything.rs\n ast_structs.rs\n chunk_utils.rs\n dummy_tokenizer.json\n file_splitter.rs\n linters.rs\n mod.rs\n parse_common.rs\n parse_python.rs\n treesitter/\n ast_instance_structs.rs\n file_ast_markup.rs\n language_id.rs\n mod.rs\n parsers.rs\n skeletonizer.rs\n structs.rs\n parsers/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n tests.rs\n ts.rs\n utils.rs\n tests/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n ts.rs\n cases/\n ts/\n main.ts\n main.ts.json\n person.ts\n person.ts.decl_json\n person.ts.skeleton\n rust/\n main.rs\n main.rs.json\n point.rs\n point.rs.decl_json\n point.rs.skeleton\n python/\n calculator.py\n calculator.py.decl_json\n calculator.py.skeleton\n main.py\n main.py.json\n js/\n car.js\n car.js.decl_json\n car.js.skeleton\n main.js\n main.js.json\n java/\n main.java\n main.java.json\n person.java\n person.java.decl_json\n person.java.skeleton\n cpp/\n circle.cpp\n circle.cpp.decl_json\n circle.cpp.skeleton\n main.cpp\n main.cpp.json\n alt_testsuite/\n cpp_goat_library.correct\n cpp_goat_library.h\n cpp_goat_main.correct\n cpp_goat_main.cpp\n jump_to_conclusions_annotated.py\n py_goat_library.correct\n py_goat_library.py\n py_goat_library_annotated.py\n py_goat_main.py\n py_goat_main_annotated.py\n py_torture1_attr.py\n py_torture1_attr_annotated.py\n py_torture2_resolving.py\n py_torture2_resolving_annotated.py\n python_binding_and_cmdline/\n setup.py\n refact/\n __init__.py\n chat_client.py\n cli_app_switcher.py\n cli_export.py\n cli_inspect.py\n cli_main.py\n cli_markdown.py\n cli_printing.py\n cli_settings.py\n cli_statusbar.py\n cli_streaming.py\n lsp_runner.py\n traj_compressor.py\n examples/\n ast_definition.sh\n ast_references.sh\n chat_with_at_command.py\n http_caps.sh\n http_chat.sh\n http_chat_passthrough.sh\n http_completion.sh\n http_rag_status.sh\n http_subchat.sh\n http_vecdb_search.sh\n lsp_runner.py\n note3.py\n rag_skeletonize_video.py\n docker/\n lsp-debug.Dockerfile\n chrome/\n mac_arm-130.0.6723.69/\n chrome-mac-arm64/\n Google Chrome for Testing.app/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest\n en.lproj/\n Localizable.strings\n Frameworks/\n Google Chrome for Testing Framework.framework/\n Versions/\n 130.0.6723.69/\n Libraries/\n WidevineCdm/\n _platform_specific/\n mac_arm64/\n libwidevinecdm.dylib\n bring_your_own_key/\n hf.yaml\n mixed.yaml\n openai.yaml\n openrouter.yaml", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_4we2wH5H50A2m6CIJqKbzYH8", + function: { + arguments: '{"paths": "Cargo.toml", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 0, + }, + { + id: "call_vmIGl31ytfpLWPkc138HJnxz", + function: { + arguments: '{"paths": "README.md", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 1, + }, + { + id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", + function: { + arguments: + '{"paths": "tests/emergency_frog_situation/frog.py", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 2, }, + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", + ftm_content: + "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", + }, + { + ftm_role: "tool", + ftm_call_id: "call_vmIGl31ytfpLWPkc138HJnxz", + ftm_content: + 'Path problems:\n\nThe path "README.md" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "tool", + ftm_call_id: "call_4we2wH5H50A2m6CIJqKbzYH8", + ftm_content: + 'Path problems:\n\nThe path "Cargo.toml" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ { - ftm_role: "tool", - ftm_call_id: "call_3wuMILBYl0z7pBMUoJOwohQf", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_github.rs" does not exist. There are no similar names either.\n', + file_name: "frog.py", + file_content: + 'import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n def croak(self, n_times):\n for n in range(n_times):\n print("croak")\n\n\nclass AlternativeFrog:\n def alternative_jump(self):\n pass\n\n\ndef standalone_jumping_function():\n print("I\'m a frog! Jump! Jump! Really!")\n', + line1: 1, + line2: 39, }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_AzBRj8RiL1wc8eejuBVdaS6t", - function: { - arguments: - '{"paths":"src/integrations/integr_gitlab.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], + ]), + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_sNnvyOLlmulfHIikJzKhmz55", + function: { + arguments: '{"paths": "src/Cargo.toml", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 0, + }, + { + id: "call_n9FACVDl6a4tllgyWaLCl8s9", + function: { + arguments: '{"paths": "src/main.rs", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 1, + }, + { + id: "call_1DUkSfLq6Io8xowqVjJEuybM", + function: { + arguments: '{"paths": "src/lib.rs", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 2, }, - { - ftm_role: "tool", - ftm_call_id: "call_AzBRj8RiL1wc8eejuBVdaS6t", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_gitlab.rs" does not exist. There are no similar names either.\n', + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_1DUkSfLq6Io8xowqVjJEuybM", + ftm_content: + 'Path problems:\n\nThe path "src/lib.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "tool", + ftm_call_id: "call_n9FACVDl6a4tllgyWaLCl8s9", + ftm_content: + 'Path problems:\n\nThe path "src/main.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "tool", + ftm_call_id: "call_sNnvyOLlmulfHIikJzKhmz55", + ftm_content: + 'Path problems:\n\nThe path "src/Cargo.toml" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_h8biZ8JNUjeWGbL9CCatbPcf", + function: { + arguments: '{"paths":"src/background_tasks.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", - function: { - arguments: - '{"paths":"src/integrations/integr_chrome.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_h8biZ8JNUjeWGbL9CCatbPcf", + ftm_content: + 'Path problems:\n\nThe path "src/background_tasks.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", + function: { + arguments: '{"paths":"src/tools/integr_postgres.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "tool", - ftm_call_id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_chrome.rs" does not exist. There are no similar names either.\n', + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", + ftm_content: + 'Path problems:\n\nThe path "src/tools/integr_postgres.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_RXDRwr3yCJEplHud4xRoxWlC", + function: { + arguments: + '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_kZFTzONZdyo11FQcSudIo3vK", - function: { - arguments: - '{"paths":"src/integrations/integr_pdb.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_RXDRwr3yCJEplHud4xRoxWlC", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_OvYAforNKAFz0d7mv96AnTVR", + function: { + arguments: + '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "tool", - ftm_call_id: "call_kZFTzONZdyo11FQcSudIo3vK", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_pdb.rs" does not exist. There are no similar names either.\n', + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_OvYAforNKAFz0d7mv96AnTVR", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_3wuMILBYl0z7pBMUoJOwohQf", + function: { + arguments: + '{"paths":"src/integrations/integr_github.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_MDynldaxbGEuCKSuQg0Vgk5z", - function: { - arguments: - '{"paths":"src/integrations/integr_docker.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_3wuMILBYl0z7pBMUoJOwohQf", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_github.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_AzBRj8RiL1wc8eejuBVdaS6t", + function: { + arguments: + '{"paths":"src/integrations/integr_gitlab.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "tool", - ftm_call_id: "call_MDynldaxbGEuCKSuQg0Vgk5z", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_docker.rs" does not exist. There are no similar names either.\n', + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_AzBRj8RiL1wc8eejuBVdaS6t", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_gitlab.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", + function: { + arguments: + '{"paths":"src/integrations/integr_chrome.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_9fCGZwstx7G1MgHs6JD5JWTn", - function: { - arguments: - '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_chrome.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_kZFTzONZdyo11FQcSudIo3vK", + function: { + arguments: '{"paths":"src/integrations/integr_pdb.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "tool", - ftm_call_id: "call_9fCGZwstx7G1MgHs6JD5JWTn", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_kZFTzONZdyo11FQcSudIo3vK", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_pdb.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_MDynldaxbGEuCKSuQg0Vgk5z", + function: { + arguments: + '{"paths":"src/integrations/integr_docker.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_etmMcI1UwBSaWwZHzxsuL8xu", - function: { - arguments: - '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_MDynldaxbGEuCKSuQg0Vgk5z", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_docker.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_9fCGZwstx7G1MgHs6JD5JWTn", + function: { + arguments: + '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, - { - ftm_role: "tool", - ftm_call_id: "call_etmMcI1UwBSaWwZHzxsuL8xu", - ftm_content: - 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_9fCGZwstx7G1MgHs6JD5JWTn", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_etmMcI1UwBSaWwZHzxsuL8xu", + function: { + arguments: + '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, }, ], - title: - "🔧 The postgres tool should be visible now. To test the tool, list the tables available, briefly desctibe the tables and express\nsatisfaction and relief if it works, and change nothing. If it doesn't work or the tool isn't available, go through the usual plan in the system prompt.\nThe current config file is .\n", - model: "", - tool_use: "agent", - integration: { - name: "postgres", - path: "", - project: "", - }, - read: true, - new_chat_suggested: { - wasSuggested: false, - }, - createdAt: "2024-12-02T14:42:18.902Z", - updatedAt: "2024-12-02T14:42:18.902Z", - }, - error: null, - prevent_send: true, - waiting_for_response: false, - max_new_tokens: 4096, - cache: {}, - tool_use: "agent", - send_immediately: false, -}; + }, + { + ftm_role: "tool", + ftm_call_id: "call_etmMcI1UwBSaWwZHzxsuL8xu", + ftm_content: + 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', + }, +].map((message, index) => { + return { + ftm_belongs_to_ft_id: "test", + ftm_num: index, + ftm_alt: 100, + ftm_prev_alt: 100, + ftm_created_ts: Date.now(), + ftm_call_id: "", + ...message, + }; +}); diff --git a/refact-agent/gui/src/__fixtures__/chat_links_response.ts b/refact-agent/gui/src/__fixtures__/chat_links_response.ts index 3f9a62f9e..bd988708a 100644 --- a/refact-agent/gui/src/__fixtures__/chat_links_response.ts +++ b/refact-agent/gui/src/__fixtures__/chat_links_response.ts @@ -42,11 +42,14 @@ export const STUB_LINKS_FOR_CHAT_RESPONSE: LinksForChatResponse = { "/Users/kot/code_aprojects/demotest/.refact/project_summary.yaml", }, messages: [ - { - ftm_role: "user", - ftm_content: - "Make recommended_integrations an empty list, follow the system prompt.", - }, + // { + // ftm_role: "user", + // ftm_content: + // "Make recommended_integrations an empty list, follow the system prompt.", + // ftm_alt: 100, + // ftm_num: 1, + // ftm_prev_alt: 100, + // }, ], }, }, diff --git a/refact-agent/gui/src/__fixtures__/chat_textdoc.ts b/refact-agent/gui/src/__fixtures__/chat_textdoc.ts index e6e43a758..ca688c93c 100644 --- a/refact-agent/gui/src/__fixtures__/chat_textdoc.ts +++ b/refact-agent/gui/src/__fixtures__/chat_textdoc.ts @@ -1,1094 +1,1090 @@ -/*eslint no-irregular-whitespace: ["error", { "skipComments": true }]*/ +import type { BaseMessage } from "../services/refact/types"; -import type { ChatThread } from "../features/Chat/Thread"; -export const CHAT_WITH_TEXTDOC: ChatThread = { - id: "754565e2-8efd-469b-a9bf-1414ce566ff2", - new_chat_suggested: { wasSuggested: false }, - messages: [ - { - ftm_role: "system", - ftm_content: - "[mode3] You are Refact Agent, an autonomous bot for coding tasks.\n\nCore Principles\n1. Use knowledge()\n - Always use knowledge() first when you encounter an agentic (complex) task.\n - This tool can access external data, including successful “trajectories” (examples of past solutions).\n - External database records begin with the icon “🗃️” followed by a record identifier.\n - Use these records to help solve your tasks by analogy.\n2. Use locate() with the Full Problem Statement\n - Provide the entire user request in the problem_statement argument to avoid losing any details (“telephone game” effect).\n - Include user’s emotional stance, code snippets, formatting, instructions—everything word-for-word.\n - Only omit parts of the user’s request if they are unrelated to the final solution.\n - Avoid using locate() if the problem is quite simple and can be solved without extensive project analysis.\n\nAnswering Strategy\n1. If the user’s question is unrelated to the project\n - Answer directly without using any special calls.\n2. If the user’s question is related to the project\n - First, call knowledge() for relevant information and best practices.\n3. Making Changes\n - If a solution requires file changes, use `*_textdoc()` tools.\n - It's a good practice to call cat() to track changes for changed files.\n\nImportant Notes\n1. Parallel Exploration\n - When you explore different ideas, use multiple parallel methods.\n2. Project-Related Questions\n - For any project question, always call knowledge() before taking any action.\n\nWhen running on user's laptop, you most likely have the shell() tool. It's for one-time dependency installations, or doing whatever\nuser is asking you to do. Tools the user can set up are better, because they don't require confimations when running on a laptop.\nWhen doing something typical for the project, offer the user to make a cmdline_* tool after you have run it.\nYou can do this by writing:\n\n🧩SETTINGS:cmdline_cargo_check\n\nfrom a new line, that will open (when clicked) a wizard that creates `cargo check` (in this example) command line tool.\n\nIn a similar way, service_* tools work. The difference is cmdline_* is designed for non-interactive blocking commands that immediately\nreturn text in stdout/stderr, and service_* is designed for blocking background commands, such as hypercorn server that runs forever until you hit Ctrl+C.\nHere is another example:\n\n🧩SETTINGS:service_hypercorn\n\n\nYou might receive additional instructions that start with 💿. Those are not coming from the user, they are programmed to help you operate\nwell and they are always in English. Answer in the language the user has asked the question.\n\n\n- below general information about the current project -\n\nThe current IDE workspace has these project directories:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation\n\nThere is no active file currently open in the IDE.\nThe project is under git version control, located at:\n/Users/marc/Projects/refact-lsp\n\n\n", - }, - { - ftm_role: "user", - ftm_content: "Create tests for frog\n", - checkpoints: [ - { - workspace_folder: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", - commit_hash: "ae0970ff3eca36d1867466847ac876d747357668", - }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "I'll help create tests for the frog-related code. Let me first understand the project structure and content.", - ftm_tool_calls: [ - { - id: "toolu_01XVhkyaDunsy4fPrDqy3toa", - function: { - arguments: - '{"goal": "Create tests for frog-related code", "language_slash_framework": "rust/tokio", "im_going_to_use_tools": "cat, tree", "im_going_to_apply_to": "emergency_frog_situation, tests"}', - name: "knowledge", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01XVhkyaDunsy4fPrDqy3toa", - ftm_content: - "🗃️e19af1e7b3\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n\n🗃️d84f5c4a7c\nAdditional instructions for django web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and locate(), looking into relevant files using cat(). If you see reference designs and sketches, read them using cat()\n2. Start django server\n3. Navigate to the place on the website that user wants to change, make a screenshot to make sure you understand what exactly needs to change\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it.\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place.\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE YOU HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n🗃️ae3f1228bd\n[\n[\"goal\", \"Rename all occurrences of 'frog' to 'bird' in the project\"],\n[\"tree(use_ast=true)\", \"Found emergency_frog_situation/ with index.html, holiday.py, work_day.py, game.js, jump_to_conclusions.py, bird.py, set_as_avatar.py\"],\n[\"search(query='frog', scope='workspace')\", \"Found frog references in work_day.py (imports, function), jump_to_conclusions.py (imports, class usage), bird.py already has Bird class\"],\n[\"thinking\", \"bird.py already has Bird class and set_as_avatar.py uses it, so we need to update work_day.py and jump_to_conclusions.py to use the existing Bird class\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 001 'work_day.py' changed import frog->bird, bring_your_own_frog->bring_your_own_bird, frog.Frog->bird.Bird\"],\n[\"patch(tickets='001', path='tests/emergency_frog_situation/work_day.py')\", \"3 chunks applied: import change, function rename, type annotation update\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 002 'jump_to_conclusions.py' changed import frog->bird, draw_hello_frog->draw_hello_bird, all frog.Frog->bird.Bird\"],\n[\"patch(tickets='002', path='tests/emergency_frog_situation/jump_to_conclusions.py')\", \"5 chunks applied: import, function rename, constructor call, type annotation, function call\"],\n[\"outcome\", \"SUCCESS\"]\n]\n\n🗃️2b684b6e70\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nLet me check the project structure to understand what we're working with.", - ftm_tool_calls: [ - { - id: "toolu_01HMyLgKsLQURM9vgd3vQKXN", - function: { - arguments: '{"use_ast": true}', - name: "tree", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01HMyLgKsLQURM9vgd3vQKXN", - ftm_content: - "/\n home/\n svakhreev/\n projects/\n refact-lsp/\n tests/\n emergency_frog_situation/\n holiday.py\n work_day.py\n __pycache__/\n frog.cpython-310.pyc\n frog.py\n jump_to_conclusions.py\n set_as_avatar.py", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nLet me examine the frog-related files to understand what we need to test.", - ftm_tool_calls: [ - { - id: "toolu_0136dUTkih5ES8rrzHa5B5ep", - function: { - arguments: - '{"paths": "tests/emergency_frog_situation/frog.py", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_0136dUTkih5ES8rrzHa5B5ep", - ftm_content: - "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "frog.py", - file_content: - 'import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n def croak(self, n_times):\n for n in range(n_times):\n print("croak")\n\n\nclass AlternativeFrog:\n def alternative_jump(self):\n pass\n\n\ndef standalone_jumping_function():\n print("I\'m a frog! Jump! Jump! Really!")\n', - line1: 1, - line2: 39, - }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: - "I'll create tests for the Frog class and its related functionality. We'll need to test the initialization, bouncing behavior, jumping mechanics, and the croak method. Let me create a test file using pytest.", - ftm_tool_calls: [ - { - id: "toolu_015isrQXYKSQ4XYqECkXis3J", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "content": "import pytest\\nimport numpy as np\\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\\nfrom unittest.mock import patch\\nimport io\\nimport sys\\n\\ndef test_frog_initialization():\\n frog = Frog(1.0, 2.0, 3.0, 4.0)\\n assert frog.x == 1.0\\n assert frog.y == 2.0\\n assert frog.vx == 3.0\\n assert frog.vy == 4.0\\n\\nclass TestFrogBouncing:\\n def test_bounce_off_left_bank(self):\\n frog = Frog(-1.0, 5.0, -2.0, 0.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vx > 0 # Should reverse horizontal velocity\\n\\n def test_bounce_off_right_bank(self):\\n frog = Frog(11.0, 5.0, 2.0, 0.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vx < 0 # Should reverse horizontal velocity\\n\\n def test_bounce_off_bottom(self):\\n frog = Frog(5.0, -1.0, 0.0, -2.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vy > 0 # Should reverse vertical velocity\\n\\n def test_bounce_off_top(self):\\n frog = Frog(5.0, 11.0, 0.0, 2.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vy < 0 # Should reverse vertical velocity\\n\\n def test_no_bounce_within_bounds(self):\\n frog = Frog(5.0, 5.0, 1.0, 1.0)\\n original_vx, original_vy = frog.vx, frog.vy\\n frog.bounce_off_banks(10, 10)\\n assert frog.vx == original_vx\\n assert frog.vy == original_vy\\n\\nclass TestFrogJumping:\\n def test_normal_jump(self):\\n frog = Frog(5.0, 5.0, 1.0, 1.0)\\n expected_x = 5.0 + 1.0 * DT\\n expected_y = 5.0 + 1.0 * DT\\n frog.jump(10, 10)\\n assert np.isclose(frog.x, expected_x)\\n assert np.isclose(frog.y, expected_y)\\n\\n def test_jump_with_boundary_clipping(self):\\n frog = Frog(9.9, 9.9, 10.0, 10.0)\\n frog.jump(10, 10)\\n assert frog.x == 10.0 # Should be clipped to pond width\\n assert frog.y == 10.0 # Should be clipped to pond height\\n\\n def test_jump_with_negative_boundary_clipping(self):\\n frog = Frog(0.1, 0.1, -10.0, -10.0)\\n frog.jump(10, 10)\\n assert frog.x == 0.0 # Should be clipped to minimum\\n assert frog.y == 0.0 # Should be clipped to minimum\\n\\ndef test_croak():\\n frog = Frog(0, 0, 0, 0)\\n # Capture stdout\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n frog.croak(3)\\n \\n # Restore stdout\\n sys.stdout = sys.__stdout__\\n \\n output = captured_output.getvalue().strip()\\n expected_output = \\"croak\\\\ncroak\\\\ncroak\\"\\n assert output == expected_output\\n\\ndef test_alternative_frog():\\n alt_frog = AlternativeFrog()\\n # Just verify it can be instantiated and method exists\\n alt_frog.alternative_jump()\\n\\ndef test_standalone_jumping_function():\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n standalone_jumping_function()\\n \\n sys.stdout = sys.__stdout__\\n \\n output = captured_output.getvalue().strip()\\n assert output == \\"I\'m a frog! Jump! Jump! Really!\\""}', - name: "create_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 1, - line2: 1, - lines_remove: "", - lines_add: - 'import pytest\nimport numpy as np\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\nfrom unittest.mock import patch\nimport io\nimport sys\n\ndef test_frog_initialization():\n frog = Frog(1.0, 2.0, 3.0, 4.0)\n assert frog.x == 1.0\n assert frog.y == 2.0\n assert frog.vx == 3.0\n assert frog.vy == 4.0\n\nclass TestFrogBouncing:\n def test_bounce_off_left_bank(self):\n frog = Frog(-1.0, 5.0, -2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx > 0 # Should reverse horizontal velocity\n\n def test_bounce_off_right_bank(self):\n frog = Frog(11.0, 5.0, 2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx < 0 # Should reverse horizontal velocity\n\n def test_bounce_off_bottom(self):\n frog = Frog(5.0, -1.0, 0.0, -2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy > 0 # Should reverse vertical velocity\n\n def test_bounce_off_top(self):\n frog = Frog(5.0, 11.0, 0.0, 2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy < 0 # Should reverse vertical velocity\n\n def test_no_bounce_within_bounds(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n original_vx, original_vy = frog.vx, frog.vy\n frog.bounce_off_banks(10, 10)\n assert frog.vx == original_vx\n assert frog.vy == original_vy\n\nclass TestFrogJumping:\n def test_normal_jump(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n expected_x = 5.0 + 1.0 * DT\n expected_y = 5.0 + 1.0 * DT\n frog.jump(10, 10)\n assert np.isclose(frog.x, expected_x)\n assert np.isclose(frog.y, expected_y)\n\n def test_jump_with_boundary_clipping(self):\n frog = Frog(9.9, 9.9, 10.0, 10.0)\n frog.jump(10, 10)\n assert frog.x == 10.0 # Should be clipped to pond width\n assert frog.y == 10.0 # Should be clipped to pond height\n\n def test_jump_with_negative_boundary_clipping(self):\n frog = Frog(0.1, 0.1, -10.0, -10.0)\n frog.jump(10, 10)\n assert frog.x == 0.0 # Should be clipped to minimum\n assert frog.y == 0.0 # Should be clipped to minimum\n\ndef test_croak():\n frog = Frog(0, 0, 0, 0)\n # Capture stdout\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n frog.croak(3)\n \n # Restore stdout\n sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n expected_output = "croak\\ncroak\\ncroak"\n assert output == expected_output\n\ndef test_alternative_frog():\n alt_frog = AlternativeFrog()\n # Just verify it can be instantiated and method exists\n alt_frog.alternative_jump()\n\ndef test_standalone_jumping_function():\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n standalone_jumping_function()\n \n sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n assert output == "I\'m a frog! Jump! Jump! Really!"\n', - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_015isrQXYKSQ4XYqECkXis3J", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nI've created a comprehensive test suite for the frog module. Here's what the tests cover:\n\n1. Basic Initialization\n- Tests that a Frog object is correctly initialized with given coordinates and velocities\n\n2. Bouncing Behavior (TestFrogBouncing class)\n- Tests bouncing off all four boundaries (left, right, top, bottom)\n- Tests that velocity is reversed appropriately when hitting boundaries\n- Tests that no bouncing occurs when the frog is within bounds\n\n3. Jumping Mechanics (TestFrogJumping class)\n- Tests normal jumping behavior with velocity and time step (DT)\n- Tests boundary clipping when jumping would exceed pond dimensions\n- Tests clipping at both maximum and minimum boundaries\n\n4. Croaking\n- Tests that the croak method outputs the correct number of \"croak\" messages\n\n5. Additional Components\n- Tests for AlternativeFrog class (basic instantiation and method existence)\n- Tests for the standalone_jumping_function\n\nThe tests use pytest and include proper handling of stdout capture for testing print statements. They also use numpy's isclose() for floating-point comparisons where appropriate.\n\nTo run these tests, you would need to have pytest installed and can run them using:\n```bash\npytest test_frog.py -v\n```\n\nThe tests are designed to be thorough while remaining readable and maintainable. Each test has a clear purpose and includes comments where necessary to explain the expected behavior.\n\nWould you like me to add any additional test cases or modify the existing ones?", - finish_reason: "stop", - }, - { - ftm_role: "user", - ftm_content: "Replace generated tests with well documented tests\n", - checkpoints: [ - { - workspace_folder: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", - commit_hash: "b911227ed17022f6d828cb54a130f08c50273484", - }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "I'll create a new version of the tests with comprehensive documentation and clear test organization.", - ftm_tool_calls: [ - { - id: "toolu_019L5hGRf36DCj9q4BQfofBV", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "replacement": "\\"\\"\\"\\nTests for the Frog simulation module.\\n\\nThis test suite verifies the behavior of a frog in a bounded pond environment.\\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\\n\\nKey components tested:\\n- Frog initialization and state\\n- Boundary detection and bouncing behavior\\n- Movement and position updates\\n- Sound production (croaking)\\n\\"\\"\\"\\n\\nimport pytest\\nimport numpy as np\\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\\nimport io\\nimport sys\\n\\n\\n@pytest.fixture\\ndef basic_frog():\\n \\"\\"\\"\\n Provides a basic frog instance for testing.\\n \\n Returns:\\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\\n \\"\\"\\"\\n return Frog(5.0, 5.0, 1.0, 1.0)\\n\\n\\n@pytest.fixture\\ndef pond_dimensions():\\n \\"\\"\\"\\n Provides standard pond dimensions for testing.\\n \\n Returns:\\n tuple: (width, height) of the pond, both set to 10 units\\n \\"\\"\\"\\n return (10.0, 10.0)\\n\\n\\ndef test_frog_initialization():\\n \\"\\"\\"\\n Verify that a frog is correctly initialized with given parameters.\\n \\n Tests:\\n - Position coordinates (x, y)\\n - Velocity components (vx, vy)\\n \\"\\"\\"\\n x, y = 1.0, 2.0\\n vx, vy = 3.0, 4.0\\n frog = Frog(x, y, vx, vy)\\n \\n assert frog.x == x, f\\"Expected x-position to be {x}\\"\\n assert frog.y == y, f\\"Expected y-position to be {y}\\"\\n assert frog.vx == vx, f\\"Expected x-velocity to be {vx}\\"\\n assert frog.vy == vy, f\\"Expected y-velocity to be {vy}\\"\\n\\n\\nclass TestBoundaryBehavior:\\n \\"\\"\\"Tests for frog\'s interaction with pond boundaries.\\"\\"\\"\\n\\n @pytest.mark.parametrize(\\"test_case\\", [\\n # (starting_pos, starting_vel, expected_vel, description)\\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), \\"left boundary\\"),\\n ((11.0, 5.0), (2.0, 0.0), (-2.0, 0.0), \\"right boundary\\"),\\n ((5.0, -1.0), (0.0, -2.0), (0.0, 2.0), \\"bottom boundary\\"),\\n ((5.0, 11.0), (0.0, 2.0), (0.0, -2.0), \\"top boundary\\")\\n ])\\n def test_boundary_bouncing(self, test_case, pond_dimensions):\\n \\"\\"\\"\\n Test bouncing behavior at all pond boundaries.\\n \\n Verifies that velocity is correctly reversed when the frog hits a boundary.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"\\n pos, vel, expected, boundary = test_case\\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\\n \\n frog.bounce_off_banks(*pond_dimensions)\\n \\n assert (frog.vx, frog.vy) == expected, \\\\\\n f\\"Incorrect velocity after bouncing off {boundary}\\"\\n\\n def test_no_bounce_within_bounds(self, basic_frog, pond_dimensions):\\n \\"\\"\\"\\n Verify that no velocity changes occur when frog is within pond boundaries.\\n \\"\\"\\"\\n original_vx, original_vy = basic_frog.vx, basic_frog.vy\\n \\n basic_frog.bounce_off_banks(*pond_dimensions)\\n \\n assert (basic_frog.vx, basic_frog.vy) == (original_vx, original_vy), \\\\\\n \\"Velocity should not change when frog is within bounds\\"\\n\\n\\nclass TestMovement:\\n \\"\\"\\"Tests for frog\'s movement mechanics.\\"\\"\\"\\n\\n def test_normal_jump(self, basic_frog, pond_dimensions):\\n \\"\\"\\"\\n Test standard jumping motion within pond boundaries.\\n \\n Verifies that position is updated correctly according to:\\n new_position = old_position + velocity * time_step\\n \\"\\"\\"\\n initial_x, initial_y = basic_frog.x, basic_frog.y\\n expected_x = initial_x + basic_frog.vx * DT\\n expected_y = initial_y + basic_frog.vy * DT\\n \\n basic_frog.jump(*pond_dimensions)\\n \\n assert np.isclose(basic_frog.x, expected_x), \\\\\\n \\"Incorrect x-position after jump\\"\\n assert np.isclose(basic_frog.y, expected_y), \\\\\\n \\"Incorrect y-position after jump\\"\\n\\n @pytest.mark.parametrize(\\"test_case\\", [\\n # (pos, vel, expected_pos, description)\\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), \\"upper bounds\\"),\\n ((0.1, 0.1), (-10.0, -10.0), (0.0, 0.0), \\"lower bounds\\")\\n ])\\n def test_boundary_clipping(self, test_case, pond_dimensions):\\n \\"\\"\\"\\n Test position clipping at pond boundaries.\\n \\n Verifies that frog\'s position is properly constrained within pond limits.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"\\n pos, vel, expected, bounds = test_case\\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\\n \\n frog.jump(*pond_dimensions)\\n \\n assert (frog.x, frog.y) == expected, \\\\\\n f\\"Incorrect position clipping at {bounds}\\"\\n\\n\\ndef test_croak_output():\\n \\"\\"\\"\\n Test frog\'s croaking behavior.\\n \\n Verifies that:\\n - Correct number of croaks are produced\\n - Each croak is on a new line\\n - Output format matches expectations\\n \\"\\"\\"\\n frog = Frog(0, 0, 0, 0)\\n n_croaks = 3\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n try:\\n frog.croak(n_croaks)\\n output = captured_output.getvalue().strip()\\n expected_output = \\"croak\\\\n\\" * n_croaks\\n finally:\\n sys.stdout = sys.__stdout__\\n \\n assert output == expected_output.strip(), \\\\\\n f\\"Expected {n_croaks} croaks, got: {output}\\"\\n\\n\\ndef test_alternative_frog_existence():\\n \\"\\"\\"\\n Verify the existence and basic structure of AlternativeFrog class.\\n \\n This is a structural test to ensure the alternative implementation\\n maintains the expected interface.\\n \\"\\"\\"\\n alt_frog = AlternativeFrog()\\n assert hasattr(alt_frog, \'alternative_jump\'), \\\\\\n \\"AlternativeFrog missing alternative_jump method\\"\\n alt_frog.alternative_jump() # Should not raise any exceptions\\n\\n\\ndef test_standalone_jumping_function():\\n \\"\\"\\"\\n Test the standalone jumping function\'s output.\\n \\n Verifies the exact string output of the function, including punctuation\\n and spacing.\\n \\"\\"\\"\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n try:\\n standalone_jumping_function()\\n output = captured_output.getvalue().strip()\\n finally:\\n sys.stdout = sys.__stdout__\\n \\n expected = \\"I\'m a frog! Jump! Jump! Really!\\"\\n assert output == expected, \\\\\\n f\\"Unexpected output: {output}\\""}', - name: "replace_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 1, - line2: 1, - lines_remove: "", - lines_add: - '"""\nTests for the Frog simulation module.\n\nThis test suite verifies the behavior of a frog in a bounded pond environment.\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\n\nKey components tested:\n- Frog initialization and state\n- Boundary detection and bouncing behavior\n- Movement and position updates\n- Sound production (croaking)\n"""\n\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 4, - line2: 5, - lines_remove: "from unittest.mock import patch\n", - lines_add: "", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 8, - line2: 8, - lines_remove: "", - lines_add: - '\n@pytest.fixture\ndef basic_frog():\n """\n Provides a basic frog instance for testing.\n \n Returns:\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\n """\n return Frog(5.0, 5.0, 1.0, 1.0)\n\n\n@pytest.fixture\ndef pond_dimensions():\n """\n Provides standard pond dimensions for testing.\n \n Returns:\n tuple: (width, height) of the pond, both set to 10 units\n """\n return (10.0, 10.0)\n\n\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 9, - line2: 14, - lines_remove: - " frog = Frog(1.0, 2.0, 3.0, 4.0)\n assert frog.x == 1.0\n assert frog.y == 2.0\n assert frog.vx == 3.0\n assert frog.vy == 4.0\n", - lines_add: - ' """\n Verify that a frog is correctly initialized with given parameters.\n \n Tests:\n - Position coordinates (x, y)\n - Velocity components (vx, vy)\n """\n x, y = 1.0, 2.0\n vx, vy = 3.0, 4.0\n frog = Frog(x, y, vx, vy)\n \n assert frog.x == x, f"Expected x-position to be {x}"\n assert frog.y == y, f"Expected y-position to be {y}"\n assert frog.vx == vx, f"Expected x-velocity to be {vx}"\n assert frog.vy == vy, f"Expected y-velocity to be {vy}"\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 15, - line2: 20, - lines_remove: - "class TestFrogBouncing:\n def test_bounce_off_left_bank(self):\n frog = Frog(-1.0, 5.0, -2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx > 0 # Should reverse horizontal velocity\n", - lines_add: "", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 21, - line2: 25, - lines_remove: - " def test_bounce_off_right_bank(self):\n frog = Frog(11.0, 5.0, 2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx < 0 # Should reverse horizontal velocity\n", - lines_add: - 'class TestBoundaryBehavior:\n """Tests for frog\'s interaction with pond boundaries."""\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 26, - line2: 30, - lines_remove: - " def test_bounce_off_bottom(self):\n frog = Frog(5.0, -1.0, 0.0, -2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy > 0 # Should reverse vertical velocity\n", - lines_add: - ' @pytest.mark.parametrize("test_case", [\n # (starting_pos, starting_vel, expected_vel, description)\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n ((11.0, 5.0), (2.0, 0.0), (-2.0, 0.0), "right boundary"),\n ((5.0, -1.0), (0.0, -2.0), (0.0, 2.0), "bottom boundary"),\n ((5.0, 11.0), (0.0, 2.0), (0.0, -2.0), "top boundary")\n ])\n def test_boundary_bouncing(self, test_case, pond_dimensions):\n """\n Test bouncing behavior at all pond boundaries.\n \n Verifies that velocity is correctly reversed when the frog hits a boundary.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n pos, vel, expected, boundary = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.bounce_off_banks(*pond_dimensions)\n \n assert (frog.vx, frog.vy) == expected, \\\n f"Incorrect velocity after bouncing off {boundary}"\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 31, - line2: 35, - lines_remove: - " def test_bounce_off_top(self):\n frog = Frog(5.0, 11.0, 0.0, 2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy < 0 # Should reverse vertical velocity\n", - lines_add: - ' def test_no_bounce_within_bounds(self, basic_frog, pond_dimensions):\n """\n Verify that no velocity changes occur when frog is within pond boundaries.\n """\n original_vx, original_vy = basic_frog.vx, basic_frog.vy\n \n basic_frog.bounce_off_banks(*pond_dimensions)\n \n assert (basic_frog.vx, basic_frog.vy) == (original_vx, original_vy), \\\n "Velocity should not change when frog is within bounds"\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 36, - line2: 42, - lines_remove: - " def test_no_bounce_within_bounds(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n original_vx, original_vy = frog.vx, frog.vy\n frog.bounce_off_banks(10, 10)\n assert frog.vx == original_vx\n assert frog.vy == original_vy\n", - lines_add: "", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 43, - line2: 51, - lines_remove: - "class TestFrogJumping:\n def test_normal_jump(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n expected_x = 5.0 + 1.0 * DT\n expected_y = 5.0 + 1.0 * DT\n frog.jump(10, 10)\n assert np.isclose(frog.x, expected_x)\n assert np.isclose(frog.y, expected_y)\n", - lines_add: - 'class TestMovement:\n """Tests for frog\'s movement mechanics."""\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 52, - line2: 57, - lines_remove: - " def test_jump_with_boundary_clipping(self):\n frog = Frog(9.9, 9.9, 10.0, 10.0)\n frog.jump(10, 10)\n assert frog.x == 10.0 # Should be clipped to pond width\n assert frog.y == 10.0 # Should be clipped to pond height\n", - lines_add: - ' def test_normal_jump(self, basic_frog, pond_dimensions):\n """\n Test standard jumping motion within pond boundaries.\n \n Verifies that position is updated correctly according to:\n new_position = old_position + velocity * time_step\n """\n initial_x, initial_y = basic_frog.x, basic_frog.y\n expected_x = initial_x + basic_frog.vx * DT\n expected_y = initial_y + basic_frog.vy * DT\n \n basic_frog.jump(*pond_dimensions)\n \n assert np.isclose(basic_frog.x, expected_x), \\\n "Incorrect x-position after jump"\n assert np.isclose(basic_frog.y, expected_y), \\\n "Incorrect y-position after jump"\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 58, - line2: 63, - lines_remove: - " def test_jump_with_negative_boundary_clipping(self):\n frog = Frog(0.1, 0.1, -10.0, -10.0)\n frog.jump(10, 10)\n assert frog.x == 0.0 # Should be clipped to minimum\n assert frog.y == 0.0 # Should be clipped to minimum\n", - lines_add: - ' @pytest.mark.parametrize("test_case", [\n # (pos, vel, expected_pos, description)\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n ((0.1, 0.1), (-10.0, -10.0), (0.0, 0.0), "lower bounds")\n ])\n def test_boundary_clipping(self, test_case, pond_dimensions):\n """\n Test position clipping at pond boundaries.\n \n Verifies that frog\'s position is properly constrained within pond limits.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n pos, vel, expected, bounds = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.jump(*pond_dimensions)\n \n assert (frog.x, frog.y) == expected, \\\n f"Incorrect position clipping at {bounds}"\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 64, - line2: 65, - lines_remove: "def test_croak():\n", - lines_add: - '\ndef test_croak_output():\n """\n Test frog\'s croaking behavior.\n \n Verifies that:\n - Correct number of croaks are produced\n - Each croak is on a new line\n - Output format matches expectations\n """\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 66, - line2: 67, - lines_remove: " # Capture stdout\n", - lines_add: " n_croaks = 3\n", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 70, - line2: 71, - lines_remove: " frog.croak(3)\n", - lines_add: - ' try:\n frog.croak(n_croaks)\n output = captured_output.getvalue().strip()\n expected_output = "croak\\n" * n_croaks\n finally:\n sys.stdout = sys.__stdout__\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 72, - line2: 78, - lines_remove: - ' # Restore stdout\n sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n expected_output = "croak\\ncroak\\ncroak"\n assert output == expected_output\n', - lines_add: - ' assert output == expected_output.strip(), \\\n f"Expected {n_croaks} croaks, got: {output}"\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 79, - line2: 80, - lines_remove: "def test_alternative_frog():\n", - lines_add: - '\ndef test_alternative_frog_existence():\n """\n Verify the existence and basic structure of AlternativeFrog class.\n \n This is a structural test to ensure the alternative implementation\n maintains the expected interface.\n """\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 81, - line2: 83, - lines_remove: - " # Just verify it can be instantiated and method exists\n alt_frog.alternative_jump()\n", - lines_add: - " assert hasattr(alt_frog, 'alternative_jump'), \\\n \"AlternativeFrog missing alternative_jump method\"\n alt_frog.alternative_jump() # Should not raise any exceptions\n", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 84, - line2: 84, - lines_remove: "", - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 85, - line2: 85, - lines_remove: "", - lines_add: - ' """\n Test the standalone jumping function\'s output.\n \n Verifies the exact string output of the function, including punctuation\n and spacing.\n """\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 88, - line2: 89, - lines_remove: " standalone_jumping_function()\n", - lines_add: - " try:\n standalone_jumping_function()\n output = captured_output.getvalue().strip()\n finally:\n sys.stdout = sys.__stdout__\n", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 90, - line2: 94, - lines_remove: - ' sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n assert output == "I\'m a frog! Jump! Jump! Really!"\n', - lines_add: - ' expected = "I\'m a frog! Jump! Jump! Really!"\n assert output == expected, \\\n f"Unexpected output: {output}"\n', - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_019L5hGRf36DCj9q4BQfofBV", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nI've completely rewritten the tests with comprehensive documentation and better organization. Here are the key improvements:\n\n1. Added Module-Level Documentation\n- Clear description of what the test suite covers\n- Key components being tested\n- Overall structure\n\n2. Added Fixtures\n- `basic_frog`: Provides a standard frog instance for testing\n- `pond_dimensions`: Provides standard pond dimensions\n- Both fixtures are well-documented with docstrings\n\n3. Organized Tests into Logical Classes\n- `TestBoundaryBehavior`: Tests for boundary interactions\n- `TestMovement`: Tests for movement mechanics\n\n4. Improved Test Documentation\n- Each test function has a detailed docstring\n- Clear description of what is being tested\n- Expected behavior and conditions\n- Any special considerations\n\n5. Better Test Structure\n- Used parametrized tests where appropriate to reduce code duplication\n- Clear separation of setup, action, and assertion phases\n- Meaningful assertion messages\n\n6. Improved Error Handling\n- Proper cleanup of system resources (stdout capture)\n- Better error messages with context\n- Use of try/finally blocks where appropriate\n\n7. Better Test Names\n- More descriptive test names that indicate what's being tested\n- Consistent naming convention\n\nKey Features:\n1. Boundary Testing\n- Comprehensive tests for all pond boundaries\n- Tests for both position and velocity changes\n- Clear error messages for boundary violations\n\n2. Movement Testing\n- Precise testing of movement calculations\n- Boundary clipping verification\n- Multiple test cases for different scenarios\n\n3. Output Testing\n- Proper capture and verification of stdout\n- Exact string matching for croaks and messages\n- Resource cleanup after stdout capture\n\n4. Structure Testing\n- Verification of alternative implementations\n- Interface consistency checks\n- Existence of required methods\n\nThe tests are now more maintainable, readable, and provide better feedback when failures occur. Each test has a clear purpose and includes proper documentation about what it's testing and why.\n\nWould you like me to explain any particular part of the test suite in more detail?", - finish_reason: "stop", - }, - { - ftm_role: "user", - ftm_content: - "Great! Update the document and remove all docstrings one by one", - checkpoints: [ - { - workspace_folder: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", - commit_hash: "0609d3823dd9e8ac84674cefb5dcf5d0b691e890", - }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "I'll help you remove the docstrings one by one from the test file while preserving the functionality.", - ftm_tool_calls: [ - { - id: "toolu_01KmJrYK6xn2XqGLXpFA5Tgp", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": "\\"\\"\\"\\nTests for the Frog simulation module.\\n\\nThis test suite verifies the behavior of a frog in a bounded pond environment.\\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\\n\\nKey components tested:\\n- Frog initialization and state\\n- Boundary detection and bouncing behavior\\n- Movement and position updates\\n- Sound production (croaking)\\n\\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 1, - line2: 3, - lines_remove: '"""\nTests for the Frog simulation module.\n', - lines_add: "", - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 4, - line2: 13, - lines_remove: - 'This test suite verifies the behavior of a frog in a bounded pond environment.\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\n\nKey components tested:\n- Frog initialization and state\n- Boundary detection and bouncing behavior\n- Movement and position updates\n- Sound production (croaking)\n"""\n', - lines_add: "", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01KmJrYK6xn2XqGLXpFA5Tgp", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01Smt5a61QbmbmZHpbNh7BKK", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Provides a basic frog instance for testing.\\n \\n Returns:\\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 12, - line2: 18, - lines_remove: - ' """\n Provides a basic frog instance for testing.\n \n Returns:\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01Smt5a61QbmbmZHpbNh7BKK", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_0168Gio2kBMYhzKe6bdxErkb", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Provides standard pond dimensions for testing.\\n \\n Returns:\\n tuple: (width, height) of the pond, both set to 10 units\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 18, - line2: 24, - lines_remove: - ' """\n Provides standard pond dimensions for testing.\n \n Returns:\n tuple: (width, height) of the pond, both set to 10 units\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_0168Gio2kBMYhzKe6bdxErkb", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_016ChnUcGQGrjfbx2dSzmmeR", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Verify that a frog is correctly initialized with given parameters.\\n \\n Tests:\\n - Position coordinates (x, y)\\n - Velocity components (vx, vy)\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 23, - line2: 30, - lines_remove: - ' """\n Verify that a frog is correctly initialized with given parameters.\n \n Tests:\n - Position coordinates (x, y)\n - Velocity components (vx, vy)\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_016ChnUcGQGrjfbx2dSzmmeR", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01Fzi693dL7aKwE9gLFetqiN", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"Tests for frog\'s interaction with pond boundaries.\\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 35, - line2: 36, - lines_remove: - ' """Tests for frog\'s interaction with pond boundaries."""\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01Fzi693dL7aKwE9gLFetqiN", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01Xtt2L5ctWViSFce6JcNR2Z", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test bouncing behavior at all pond boundaries.\\n \\n Verifies that velocity is correctly reversed when the frog hits a boundary.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 45, - line2: 54, - lines_remove: - ' """\n Test bouncing behavior at all pond boundaries.\n \n Verifies that velocity is correctly reversed when the frog hits a boundary.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01Xtt2L5ctWViSFce6JcNR2Z", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01WoEFVoJi9wSp2XgTx5PCUo", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Verify that no velocity changes occur when frog is within pond boundaries.\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 55, - line2: 58, - lines_remove: - ' """\n Verify that no velocity changes occur when frog is within pond boundaries.\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01WoEFVoJi9wSp2XgTx5PCUo", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_017YWfgyTtHRLZjm3hrfCTBK", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"Tests for frog\'s movement mechanics.\\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 65, - line2: 66, - lines_remove: ' """Tests for frog\'s movement mechanics."""\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_017YWfgyTtHRLZjm3hrfCTBK", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01NsGmmfJfDR5UhmBE4N8kD4", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test standard jumping motion within pond boundaries.\\n \\n Verifies that position is updated correctly according to:\\n new_position = old_position + velocity * time_step\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 68, - line2: 74, - lines_remove: - ' """\n Test standard jumping motion within pond boundaries.\n \n Verifies that position is updated correctly according to:\n new_position = old_position + velocity * time_step\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01NsGmmfJfDR5UhmBE4N8kD4", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01EXrnY36zeat9gdMWAaiPDf", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test position clipping at pond boundaries.\\n \\n Verifies that frog\'s position is properly constrained within pond limits.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 86, - line2: 95, - lines_remove: - ' """\n Test position clipping at pond boundaries.\n \n Verifies that frog\'s position is properly constrained within pond limits.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01EXrnY36zeat9gdMWAaiPDf", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01KqLh5gkGZEMkXzRjdQWcev", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test frog\'s croaking behavior.\\n \\n Verifies that:\\n - Correct number of croaks are produced\\n - Each croak is on a new line\\n - Output format matches expectations\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 97, - line2: 105, - lines_remove: - ' """\n Test frog\'s croaking behavior.\n \n Verifies that:\n - Correct number of croaks are produced\n - Each croak is on a new line\n - Output format matches expectations\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01KqLh5gkGZEMkXzRjdQWcev", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01GtAM91bdJY74XQRBhpqh8K", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Verify the existence and basic structure of AlternativeFrog class.\\n \\n This is a structural test to ensure the alternative implementation\\n maintains the expected interface.\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 115, - line2: 121, - lines_remove: - ' """\n Verify the existence and basic structure of AlternativeFrog class.\n \n This is a structural test to ensure the alternative implementation\n maintains the expected interface.\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01GtAM91bdJY74XQRBhpqh8K", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01FrfBzW3RjxL3okQUqRiyWf", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test the standalone jumping function\'s output.\\n \\n Verifies the exact string output of the function, including punctuation\\n and spacing.\\n \\"\\"\\"", "replacement": "", "multiple": false}', - name: "update_textdoc", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 123, - line2: 129, - lines_remove: - ' """\n Test the standalone jumping function\'s output.\n \n Verifies the exact string output of the function, including punctuation\n and spacing.\n """\n', - lines_add: "\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01FrfBzW3RjxL3okQUqRiyWf", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nNow all docstrings have been removed while preserving the test functionality. Here's a summary of what the test file contains:\n\n1. Fixtures:\n- basic_frog: Creates a standard frog at (5, 5) with velocity (1, 1)\n- pond_dimensions: Provides standard 10x10 pond dimensions\n\n2. Test Classes:\n- TestBoundaryBehavior: Tests boundary interactions with parametrized tests\n- TestMovement: Tests movement mechanics and position updates\n\n3. Standalone Tests:\n- test_frog_initialization: Basic initialization test\n- test_croak_output: Tests croaking functionality\n- test_alternative_frog_existence: Tests AlternativeFrog interface\n- test_standalone_jumping_function: Tests standalone function\n\nThe tests remain well-structured and maintain good coverage of the frog module's functionality, just without the documentation strings. Would you like me to show you the final version of the file or make any additional changes?", - finish_reason: "stop", - }, - { - ftm_role: "user", - ftm_content: "Great! Now remove all comments using regex\n", - checkpoints: [ - { - workspace_folder: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", - commit_hash: "f1234facf0fd15104484f73c8e4cebc929a5d4d5", - }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "I'll use update_textdoc_regex to remove all comments from the test file.", - ftm_tool_calls: [ - { - id: "toolu_01LHPjgzJ8SEuczfm6Av7qfv", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "pattern": "\\\\s*#.*$", "replacement": "", "multiple": true}', - name: "update_textdoc_regex", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01LHPjgzJ8SEuczfm6Av7qfv", - ftm_content: - 'No replacement was performed, `pattern` \n```\n\\s*#.*$\n```\ndid not appear verbatim in "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py". Consider checking the file content using `cat()`', - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nLet me check the file content to see the exact format of comments:", - ftm_tool_calls: [ - { - id: "toolu_019iakkKqUjKP73EmEgVhCkZ", - function: { - arguments: - '{"paths": "tests/emergency_frog_situation/test_frog.py", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_019iakkKqUjKP73EmEgVhCkZ", - ftm_content: - "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py\n", - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "test_frog.py", - file_content: - '\n\nimport pytest\nimport numpy as np\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\nimport io\nimport sys\n\n\n@pytest.fixture\ndef basic_frog():\n\n return Frog(5.0, 5.0, 1.0, 1.0)\n\n\n@pytest.fixture\ndef pond_dimensions():\n\n return (10.0, 10.0)\n\n\ndef test_frog_initialization():\n\n x, y = 1.0, 2.0\n vx, vy = 3.0, 4.0\n frog = Frog(x, y, vx, vy)\n \n assert frog.x == x, f"Expected x-position to be {x}"\n assert frog.y == y, f"Expected y-position to be {y}"\n assert frog.vx == vx, f"Expected x-velocity to be {vx}"\n assert frog.vy == vy, f"Expected y-velocity to be {vy}"\n\n\nclass TestBoundaryBehavior:\n\n\n @pytest.mark.parametrize("test_case", [\n # (starting_pos, starting_vel, expected_vel, description)\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n ((11.0, 5.0), (2.0, 0.0), (-2.0, 0.0), "right boundary"),\n ((5.0, -1.0), (0.0, -2.0), (0.0, 2.0), "bottom boundary"),\n ((5.0, 11.0), (0.0, 2.0), (0.0, -2.0), "top boundary")\n ])\n def test_boundary_bouncing(self, test_case, pond_dimensions):\n\n pos, vel, expected, boundary = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.bounce_off_banks(*pond_dimensions)\n \n assert (frog.vx, frog.vy) == expected, \\\n f"Incorrect velocity after bouncing off {boundary}"\n\n def test_no_bounce_within_bounds(self, basic_frog, pond_dimensions):\n\n original_vx, original_vy = basic_frog.vx, basic_frog.vy\n \n basic_frog.bounce_off_banks(*pond_dimensions)\n \n assert (basic_frog.vx, basic_frog.vy) == (original_vx, original_vy), \\\n "Velocity should not change when frog is within bounds"\n\n\nclass TestMovement:\n\n\n def test_normal_jump(self, basic_frog, pond_dimensions):\n\n initial_x, initial_y = basic_frog.x, basic_frog.y\n expected_x = initial_x + basic_frog.vx * DT\n expected_y = initial_y + basic_frog.vy * DT\n \n basic_frog.jump(*pond_dimensions)\n \n assert np.isclose(basic_frog.x, expected_x), \\\n "Incorrect x-position after jump"\n assert np.isclose(basic_frog.y, expected_y), \\\n "Incorrect y-position after jump"\n\n @pytest.mark.parametrize("test_case", [\n # (pos, vel, expected_pos, description)\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n ((0.1, 0.1), (-10.0, -10.0), (0.0, 0.0), "lower bounds")\n ])\n def test_boundary_clipping(self, test_case, pond_dimensions):\n\n pos, vel, expected, bounds = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.jump(*pond_dimensions)\n \n assert (frog.x, frog.y) == expected, \\\n f"Incorrect position clipping at {bounds}"\n\n\ndef test_croak_output():\n\n frog = Frog(0, 0, 0, 0)\n n_croaks = 3\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n try:\n frog.croak(n_croaks)\n output = captured_output.getvalue().strip()\n expected_output = "croak\\n" * n_croaks\n finally:\n sys.stdout = sys.__stdout__\n \n assert output == expected_output.strip(), \\\n f"Expected {n_croaks} croaks, got: {output}"\n\n\ndef test_alternative_frog_existence():\n\n alt_frog = AlternativeFrog()\n assert hasattr(alt_frog, \'alternative_jump\'), \\\n "AlternativeFrog missing alternative_jump method"\n alt_frog.alternative_jump() # Should not raise any exceptions\n\n\ndef test_standalone_jumping_function():\n\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n try:\n standalone_jumping_function()\n output = captured_output.getvalue().strip()\n finally:\n sys.stdout = sys.__stdout__\n \n expected = "I\'m a frog! Jump! Jump! Really!"\n assert output == expected, \\\n f"Unexpected output: {output}"\n', - line1: 1, - line2: 134, - }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: - "I see the comments now. Let me remove them using regex that matches both inline comments and comment lines:", - ftm_tool_calls: [ - { - id: "toolu_01PXY5nTtbMuiA4JAjN5TMtX", - function: { - arguments: - '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "pattern": "\\\\s*#.*(\\\\n|$)", "replacement": "\\\\n", "multiple": true}', - name: "update_textdoc_regex", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 37, - line2: 40, - lines_remove: - ' @pytest.mark.parametrize("test_case", [\n # (starting_pos, starting_vel, expected_vel, description)\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n', - lines_add: - ' @pytest.mark.parametrize("test_case", [\\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 80, - line2: 83, - lines_remove: - ' @pytest.mark.parametrize("test_case", [\n # (pos, vel, expected_pos, description)\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n', - lines_add: - ' @pytest.mark.parametrize("test_case", [\\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n', - file_name_rename: null, - application_details: "", - }, - { - file_name: - "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", - file_action: "edit", - line1: 119, - line2: 121, - lines_remove: - " alt_frog.alternative_jump() # Should not raise any exceptions\n\n", - lines_add: " alt_frog.alternative_jump()\\n\n", - file_name_rename: null, - application_details: "", - }, - ], - tool_call_id: "toolu_01PXY5nTtbMuiA4JAjN5TMtX", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nGreat! All comments have been removed from the test file. The functionality remains exactly the same, but now without any inline comments or comment lines. The test file is now cleaner and more concise.\n\nWould you like me to show you the final version of the file or make any other changes?", - finish_reason: "stop", - }, - ], - title: "Frog Tests Generation", - model: "", - last_user_message_id: "e6304800-f080-425c-b1e3-c1f2ce267792", - tool_use: "agent", - mode: "AGENT", - read: true, - isTitleGenerated: true, - createdAt: "2025-02-14T06:47:26.640Z", - updatedAt: "2025-02-14T06:47:26.640Z", -}; +export const CHAT_WITH_TEXTDOC: BaseMessage[] = [ + { + ftm_role: "system", + ftm_content: + "[mode3] You are Refact Agent, an autonomous bot for coding tasks.\n\nCore Principles\n1. Use knowledge()\n - Always use knowledge() first when you encounter an agentic (complex) task.\n - This tool can access external data, including successful “trajectories” (examples of past solutions).\n - External database records begin with the icon “🗃️” followed by a record identifier.\n - Use these records to help solve your tasks by analogy.\n2. Use locate() with the Full Problem Statement\n - Provide the entire user request in the problem_statement argument to avoid losing any details (“telephone game” effect).\n - Include user’s emotional stance, code snippets, formatting, instructions—everything word-for-word.\n - Only omit parts of the user’s request if they are unrelated to the final solution.\n - Avoid using locate() if the problem is quite simple and can be solved without extensive project analysis.\n\nAnswering Strategy\n1. If the user’s question is unrelated to the project\n - Answer directly without using any special calls.\n2. If the user’s question is related to the project\n - First, call knowledge() for relevant information and best practices.\n3. Making Changes\n - If a solution requires file changes, use `*_textdoc()` tools.\n - It's a good practice to call cat() to track changes for changed files.\n\nImportant Notes\n1. Parallel Exploration\n - When you explore different ideas, use multiple parallel methods.\n2. Project-Related Questions\n - For any project question, always call knowledge() before taking any action.\n\nWhen running on user's laptop, you most likely have the shell() tool. It's for one-time dependency installations, or doing whatever\nuser is asking you to do. Tools the user can set up are better, because they don't require confimations when running on a laptop.\nWhen doing something typical for the project, offer the user to make a cmdline_* tool after you have run it.\nYou can do this by writing:\n\n🧩SETTINGS:cmdline_cargo_check\n\nfrom a new line, that will open (when clicked) a wizard that creates `cargo check` (in this example) command line tool.\n\nIn a similar way, service_* tools work. The difference is cmdline_* is designed for non-interactive blocking commands that immediately\nreturn text in stdout/stderr, and service_* is designed for blocking background commands, such as hypercorn server that runs forever until you hit Ctrl+C.\nHere is another example:\n\n🧩SETTINGS:service_hypercorn\n\n\nYou might receive additional instructions that start with 💿. Those are not coming from the user, they are programmed to help you operate\nwell and they are always in English. Answer in the language the user has asked the question.\n\n\n- below general information about the current project -\n\nThe current IDE workspace has these project directories:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation\n\nThere is no active file currently open in the IDE.\nThe project is under git version control, located at:\n/Users/marc/Projects/refact-lsp\n\n\n", + }, + { + ftm_role: "user", + ftm_content: "Create tests for frog\n", + checkpoints: [ + { + workspace_folder: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", + commit_hash: "ae0970ff3eca36d1867466847ac876d747357668", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "I'll help create tests for the frog-related code. Let me first understand the project structure and content.", + ftm_tool_calls: [ + { + id: "toolu_01XVhkyaDunsy4fPrDqy3toa", + function: { + arguments: + '{"goal": "Create tests for frog-related code", "language_slash_framework": "rust/tokio", "im_going_to_use_tools": "cat, tree", "im_going_to_apply_to": "emergency_frog_situation, tests"}', + name: "knowledge", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01XVhkyaDunsy4fPrDqy3toa", + ftm_content: + "🗃️e19af1e7b3\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n\n🗃️d84f5c4a7c\nAdditional instructions for django web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and locate(), looking into relevant files using cat(). If you see reference designs and sketches, read them using cat()\n2. Start django server\n3. Navigate to the place on the website that user wants to change, make a screenshot to make sure you understand what exactly needs to change\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it.\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place.\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE YOU HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n🗃️ae3f1228bd\n[\n[\"goal\", \"Rename all occurrences of 'frog' to 'bird' in the project\"],\n[\"tree(use_ast=true)\", \"Found emergency_frog_situation/ with index.html, holiday.py, work_day.py, game.js, jump_to_conclusions.py, bird.py, set_as_avatar.py\"],\n[\"search(query='frog', scope='workspace')\", \"Found frog references in work_day.py (imports, function), jump_to_conclusions.py (imports, class usage), bird.py already has Bird class\"],\n[\"thinking\", \"bird.py already has Bird class and set_as_avatar.py uses it, so we need to update work_day.py and jump_to_conclusions.py to use the existing Bird class\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 001 'work_day.py' changed import frog->bird, bring_your_own_frog->bring_your_own_bird, frog.Frog->bird.Bird\"],\n[\"patch(tickets='001', path='tests/emergency_frog_situation/work_day.py')\", \"3 chunks applied: import change, function rename, type annotation update\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 002 'jump_to_conclusions.py' changed import frog->bird, draw_hello_frog->draw_hello_bird, all frog.Frog->bird.Bird\"],\n[\"patch(tickets='002', path='tests/emergency_frog_situation/jump_to_conclusions.py')\", \"5 chunks applied: import, function rename, constructor call, type annotation, function call\"],\n[\"outcome\", \"SUCCESS\"]\n]\n\n🗃️2b684b6e70\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nLet me check the project structure to understand what we're working with.", + ftm_tool_calls: [ + { + id: "toolu_01HMyLgKsLQURM9vgd3vQKXN", + function: { + arguments: '{"use_ast": true}', + name: "tree", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01HMyLgKsLQURM9vgd3vQKXN", + ftm_content: + "/\n home/\n svakhreev/\n projects/\n refact-lsp/\n tests/\n emergency_frog_situation/\n holiday.py\n work_day.py\n __pycache__/\n frog.cpython-310.pyc\n frog.py\n jump_to_conclusions.py\n set_as_avatar.py", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nLet me examine the frog-related files to understand what we need to test.", + ftm_tool_calls: [ + { + id: "toolu_0136dUTkih5ES8rrzHa5B5ep", + function: { + arguments: + '{"paths": "tests/emergency_frog_situation/frog.py", "skeleton": false}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_0136dUTkih5ES8rrzHa5B5ep", + ftm_content: + "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ + { + file_name: "frog.py", + file_content: + 'import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n def croak(self, n_times):\n for n in range(n_times):\n print("croak")\n\n\nclass AlternativeFrog:\n def alternative_jump(self):\n pass\n\n\ndef standalone_jumping_function():\n print("I\'m a frog! Jump! Jump! Really!")\n', + line1: 1, + line2: 39, + }, + ]), + }, + { + ftm_role: "assistant", + ftm_content: + "I'll create tests for the Frog class and its related functionality. We'll need to test the initialization, bouncing behavior, jumping mechanics, and the croak method. Let me create a test file using pytest.", + ftm_tool_calls: [ + { + id: "toolu_015isrQXYKSQ4XYqECkXis3J", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "content": "import pytest\\nimport numpy as np\\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\\nfrom unittest.mock import patch\\nimport io\\nimport sys\\n\\ndef test_frog_initialization():\\n frog = Frog(1.0, 2.0, 3.0, 4.0)\\n assert frog.x == 1.0\\n assert frog.y == 2.0\\n assert frog.vx == 3.0\\n assert frog.vy == 4.0\\n\\nclass TestFrogBouncing:\\n def test_bounce_off_left_bank(self):\\n frog = Frog(-1.0, 5.0, -2.0, 0.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vx > 0 # Should reverse horizontal velocity\\n\\n def test_bounce_off_right_bank(self):\\n frog = Frog(11.0, 5.0, 2.0, 0.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vx < 0 # Should reverse horizontal velocity\\n\\n def test_bounce_off_bottom(self):\\n frog = Frog(5.0, -1.0, 0.0, -2.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vy > 0 # Should reverse vertical velocity\\n\\n def test_bounce_off_top(self):\\n frog = Frog(5.0, 11.0, 0.0, 2.0)\\n frog.bounce_off_banks(10, 10)\\n assert frog.vy < 0 # Should reverse vertical velocity\\n\\n def test_no_bounce_within_bounds(self):\\n frog = Frog(5.0, 5.0, 1.0, 1.0)\\n original_vx, original_vy = frog.vx, frog.vy\\n frog.bounce_off_banks(10, 10)\\n assert frog.vx == original_vx\\n assert frog.vy == original_vy\\n\\nclass TestFrogJumping:\\n def test_normal_jump(self):\\n frog = Frog(5.0, 5.0, 1.0, 1.0)\\n expected_x = 5.0 + 1.0 * DT\\n expected_y = 5.0 + 1.0 * DT\\n frog.jump(10, 10)\\n assert np.isclose(frog.x, expected_x)\\n assert np.isclose(frog.y, expected_y)\\n\\n def test_jump_with_boundary_clipping(self):\\n frog = Frog(9.9, 9.9, 10.0, 10.0)\\n frog.jump(10, 10)\\n assert frog.x == 10.0 # Should be clipped to pond width\\n assert frog.y == 10.0 # Should be clipped to pond height\\n\\n def test_jump_with_negative_boundary_clipping(self):\\n frog = Frog(0.1, 0.1, -10.0, -10.0)\\n frog.jump(10, 10)\\n assert frog.x == 0.0 # Should be clipped to minimum\\n assert frog.y == 0.0 # Should be clipped to minimum\\n\\ndef test_croak():\\n frog = Frog(0, 0, 0, 0)\\n # Capture stdout\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n frog.croak(3)\\n \\n # Restore stdout\\n sys.stdout = sys.__stdout__\\n \\n output = captured_output.getvalue().strip()\\n expected_output = \\"croak\\\\ncroak\\\\ncroak\\"\\n assert output == expected_output\\n\\ndef test_alternative_frog():\\n alt_frog = AlternativeFrog()\\n # Just verify it can be instantiated and method exists\\n alt_frog.alternative_jump()\\n\\ndef test_standalone_jumping_function():\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n standalone_jumping_function()\\n \\n sys.stdout = sys.__stdout__\\n \\n output = captured_output.getvalue().strip()\\n assert output == \\"I\'m a frog! Jump! Jump! Really!\\""}', + name: "create_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 1, + line2: 1, + lines_remove: "", + lines_add: + 'import pytest\nimport numpy as np\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\nfrom unittest.mock import patch\nimport io\nimport sys\n\ndef test_frog_initialization():\n frog = Frog(1.0, 2.0, 3.0, 4.0)\n assert frog.x == 1.0\n assert frog.y == 2.0\n assert frog.vx == 3.0\n assert frog.vy == 4.0\n\nclass TestFrogBouncing:\n def test_bounce_off_left_bank(self):\n frog = Frog(-1.0, 5.0, -2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx > 0 # Should reverse horizontal velocity\n\n def test_bounce_off_right_bank(self):\n frog = Frog(11.0, 5.0, 2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx < 0 # Should reverse horizontal velocity\n\n def test_bounce_off_bottom(self):\n frog = Frog(5.0, -1.0, 0.0, -2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy > 0 # Should reverse vertical velocity\n\n def test_bounce_off_top(self):\n frog = Frog(5.0, 11.0, 0.0, 2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy < 0 # Should reverse vertical velocity\n\n def test_no_bounce_within_bounds(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n original_vx, original_vy = frog.vx, frog.vy\n frog.bounce_off_banks(10, 10)\n assert frog.vx == original_vx\n assert frog.vy == original_vy\n\nclass TestFrogJumping:\n def test_normal_jump(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n expected_x = 5.0 + 1.0 * DT\n expected_y = 5.0 + 1.0 * DT\n frog.jump(10, 10)\n assert np.isclose(frog.x, expected_x)\n assert np.isclose(frog.y, expected_y)\n\n def test_jump_with_boundary_clipping(self):\n frog = Frog(9.9, 9.9, 10.0, 10.0)\n frog.jump(10, 10)\n assert frog.x == 10.0 # Should be clipped to pond width\n assert frog.y == 10.0 # Should be clipped to pond height\n\n def test_jump_with_negative_boundary_clipping(self):\n frog = Frog(0.1, 0.1, -10.0, -10.0)\n frog.jump(10, 10)\n assert frog.x == 0.0 # Should be clipped to minimum\n assert frog.y == 0.0 # Should be clipped to minimum\n\ndef test_croak():\n frog = Frog(0, 0, 0, 0)\n # Capture stdout\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n frog.croak(3)\n \n # Restore stdout\n sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n expected_output = "croak\\ncroak\\ncroak"\n assert output == expected_output\n\ndef test_alternative_frog():\n alt_frog = AlternativeFrog()\n # Just verify it can be instantiated and method exists\n alt_frog.alternative_jump()\n\ndef test_standalone_jumping_function():\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n standalone_jumping_function()\n \n sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n assert output == "I\'m a frog! Jump! Jump! Really!"\n', + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_015isrQXYKSQ4XYqECkXis3J", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nI've created a comprehensive test suite for the frog module. Here's what the tests cover:\n\n1. Basic Initialization\n- Tests that a Frog object is correctly initialized with given coordinates and velocities\n\n2. Bouncing Behavior (TestFrogBouncing class)\n- Tests bouncing off all four boundaries (left, right, top, bottom)\n- Tests that velocity is reversed appropriately when hitting boundaries\n- Tests that no bouncing occurs when the frog is within bounds\n\n3. Jumping Mechanics (TestFrogJumping class)\n- Tests normal jumping behavior with velocity and time step (DT)\n- Tests boundary clipping when jumping would exceed pond dimensions\n- Tests clipping at both maximum and minimum boundaries\n\n4. Croaking\n- Tests that the croak method outputs the correct number of \"croak\" messages\n\n5. Additional Components\n- Tests for AlternativeFrog class (basic instantiation and method existence)\n- Tests for the standalone_jumping_function\n\nThe tests use pytest and include proper handling of stdout capture for testing print statements. They also use numpy's isclose() for floating-point comparisons where appropriate.\n\nTo run these tests, you would need to have pytest installed and can run them using:\n```bash\npytest test_frog.py -v\n```\n\nThe tests are designed to be thorough while remaining readable and maintainable. Each test has a clear purpose and includes comments where necessary to explain the expected behavior.\n\nWould you like me to add any additional test cases or modify the existing ones?", + finish_reason: "stop", + }, + { + ftm_role: "user", + ftm_content: "Replace generated tests with well documented tests\n", + checkpoints: [ + { + workspace_folder: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", + commit_hash: "b911227ed17022f6d828cb54a130f08c50273484", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "I'll create a new version of the tests with comprehensive documentation and clear test organization.", + ftm_tool_calls: [ + { + id: "toolu_019L5hGRf36DCj9q4BQfofBV", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "replacement": "\\"\\"\\"\\nTests for the Frog simulation module.\\n\\nThis test suite verifies the behavior of a frog in a bounded pond environment.\\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\\n\\nKey components tested:\\n- Frog initialization and state\\n- Boundary detection and bouncing behavior\\n- Movement and position updates\\n- Sound production (croaking)\\n\\"\\"\\"\\n\\nimport pytest\\nimport numpy as np\\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\\nimport io\\nimport sys\\n\\n\\n@pytest.fixture\\ndef basic_frog():\\n \\"\\"\\"\\n Provides a basic frog instance for testing.\\n \\n Returns:\\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\\n \\"\\"\\"\\n return Frog(5.0, 5.0, 1.0, 1.0)\\n\\n\\n@pytest.fixture\\ndef pond_dimensions():\\n \\"\\"\\"\\n Provides standard pond dimensions for testing.\\n \\n Returns:\\n tuple: (width, height) of the pond, both set to 10 units\\n \\"\\"\\"\\n return (10.0, 10.0)\\n\\n\\ndef test_frog_initialization():\\n \\"\\"\\"\\n Verify that a frog is correctly initialized with given parameters.\\n \\n Tests:\\n - Position coordinates (x, y)\\n - Velocity components (vx, vy)\\n \\"\\"\\"\\n x, y = 1.0, 2.0\\n vx, vy = 3.0, 4.0\\n frog = Frog(x, y, vx, vy)\\n \\n assert frog.x == x, f\\"Expected x-position to be {x}\\"\\n assert frog.y == y, f\\"Expected y-position to be {y}\\"\\n assert frog.vx == vx, f\\"Expected x-velocity to be {vx}\\"\\n assert frog.vy == vy, f\\"Expected y-velocity to be {vy}\\"\\n\\n\\nclass TestBoundaryBehavior:\\n \\"\\"\\"Tests for frog\'s interaction with pond boundaries.\\"\\"\\"\\n\\n @pytest.mark.parametrize(\\"test_case\\", [\\n # (starting_pos, starting_vel, expected_vel, description)\\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), \\"left boundary\\"),\\n ((11.0, 5.0), (2.0, 0.0), (-2.0, 0.0), \\"right boundary\\"),\\n ((5.0, -1.0), (0.0, -2.0), (0.0, 2.0), \\"bottom boundary\\"),\\n ((5.0, 11.0), (0.0, 2.0), (0.0, -2.0), \\"top boundary\\")\\n ])\\n def test_boundary_bouncing(self, test_case, pond_dimensions):\\n \\"\\"\\"\\n Test bouncing behavior at all pond boundaries.\\n \\n Verifies that velocity is correctly reversed when the frog hits a boundary.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"\\n pos, vel, expected, boundary = test_case\\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\\n \\n frog.bounce_off_banks(*pond_dimensions)\\n \\n assert (frog.vx, frog.vy) == expected, \\\\\\n f\\"Incorrect velocity after bouncing off {boundary}\\"\\n\\n def test_no_bounce_within_bounds(self, basic_frog, pond_dimensions):\\n \\"\\"\\"\\n Verify that no velocity changes occur when frog is within pond boundaries.\\n \\"\\"\\"\\n original_vx, original_vy = basic_frog.vx, basic_frog.vy\\n \\n basic_frog.bounce_off_banks(*pond_dimensions)\\n \\n assert (basic_frog.vx, basic_frog.vy) == (original_vx, original_vy), \\\\\\n \\"Velocity should not change when frog is within bounds\\"\\n\\n\\nclass TestMovement:\\n \\"\\"\\"Tests for frog\'s movement mechanics.\\"\\"\\"\\n\\n def test_normal_jump(self, basic_frog, pond_dimensions):\\n \\"\\"\\"\\n Test standard jumping motion within pond boundaries.\\n \\n Verifies that position is updated correctly according to:\\n new_position = old_position + velocity * time_step\\n \\"\\"\\"\\n initial_x, initial_y = basic_frog.x, basic_frog.y\\n expected_x = initial_x + basic_frog.vx * DT\\n expected_y = initial_y + basic_frog.vy * DT\\n \\n basic_frog.jump(*pond_dimensions)\\n \\n assert np.isclose(basic_frog.x, expected_x), \\\\\\n \\"Incorrect x-position after jump\\"\\n assert np.isclose(basic_frog.y, expected_y), \\\\\\n \\"Incorrect y-position after jump\\"\\n\\n @pytest.mark.parametrize(\\"test_case\\", [\\n # (pos, vel, expected_pos, description)\\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), \\"upper bounds\\"),\\n ((0.1, 0.1), (-10.0, -10.0), (0.0, 0.0), \\"lower bounds\\")\\n ])\\n def test_boundary_clipping(self, test_case, pond_dimensions):\\n \\"\\"\\"\\n Test position clipping at pond boundaries.\\n \\n Verifies that frog\'s position is properly constrained within pond limits.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"\\n pos, vel, expected, bounds = test_case\\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\\n \\n frog.jump(*pond_dimensions)\\n \\n assert (frog.x, frog.y) == expected, \\\\\\n f\\"Incorrect position clipping at {bounds}\\"\\n\\n\\ndef test_croak_output():\\n \\"\\"\\"\\n Test frog\'s croaking behavior.\\n \\n Verifies that:\\n - Correct number of croaks are produced\\n - Each croak is on a new line\\n - Output format matches expectations\\n \\"\\"\\"\\n frog = Frog(0, 0, 0, 0)\\n n_croaks = 3\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n try:\\n frog.croak(n_croaks)\\n output = captured_output.getvalue().strip()\\n expected_output = \\"croak\\\\n\\" * n_croaks\\n finally:\\n sys.stdout = sys.__stdout__\\n \\n assert output == expected_output.strip(), \\\\\\n f\\"Expected {n_croaks} croaks, got: {output}\\"\\n\\n\\ndef test_alternative_frog_existence():\\n \\"\\"\\"\\n Verify the existence and basic structure of AlternativeFrog class.\\n \\n This is a structural test to ensure the alternative implementation\\n maintains the expected interface.\\n \\"\\"\\"\\n alt_frog = AlternativeFrog()\\n assert hasattr(alt_frog, \'alternative_jump\'), \\\\\\n \\"AlternativeFrog missing alternative_jump method\\"\\n alt_frog.alternative_jump() # Should not raise any exceptions\\n\\n\\ndef test_standalone_jumping_function():\\n \\"\\"\\"\\n Test the standalone jumping function\'s output.\\n \\n Verifies the exact string output of the function, including punctuation\\n and spacing.\\n \\"\\"\\"\\n captured_output = io.StringIO()\\n sys.stdout = captured_output\\n \\n try:\\n standalone_jumping_function()\\n output = captured_output.getvalue().strip()\\n finally:\\n sys.stdout = sys.__stdout__\\n \\n expected = \\"I\'m a frog! Jump! Jump! Really!\\"\\n assert output == expected, \\\\\\n f\\"Unexpected output: {output}\\""}', + name: "replace_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 1, + line2: 1, + lines_remove: "", + lines_add: + '"""\nTests for the Frog simulation module.\n\nThis test suite verifies the behavior of a frog in a bounded pond environment.\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\n\nKey components tested:\n- Frog initialization and state\n- Boundary detection and bouncing behavior\n- Movement and position updates\n- Sound production (croaking)\n"""\n\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 4, + line2: 5, + lines_remove: "from unittest.mock import patch\n", + lines_add: "", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 8, + line2: 8, + lines_remove: "", + lines_add: + '\n@pytest.fixture\ndef basic_frog():\n """\n Provides a basic frog instance for testing.\n \n Returns:\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\n """\n return Frog(5.0, 5.0, 1.0, 1.0)\n\n\n@pytest.fixture\ndef pond_dimensions():\n """\n Provides standard pond dimensions for testing.\n \n Returns:\n tuple: (width, height) of the pond, both set to 10 units\n """\n return (10.0, 10.0)\n\n\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 9, + line2: 14, + lines_remove: + " frog = Frog(1.0, 2.0, 3.0, 4.0)\n assert frog.x == 1.0\n assert frog.y == 2.0\n assert frog.vx == 3.0\n assert frog.vy == 4.0\n", + lines_add: + ' """\n Verify that a frog is correctly initialized with given parameters.\n \n Tests:\n - Position coordinates (x, y)\n - Velocity components (vx, vy)\n """\n x, y = 1.0, 2.0\n vx, vy = 3.0, 4.0\n frog = Frog(x, y, vx, vy)\n \n assert frog.x == x, f"Expected x-position to be {x}"\n assert frog.y == y, f"Expected y-position to be {y}"\n assert frog.vx == vx, f"Expected x-velocity to be {vx}"\n assert frog.vy == vy, f"Expected y-velocity to be {vy}"\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 15, + line2: 20, + lines_remove: + "class TestFrogBouncing:\n def test_bounce_off_left_bank(self):\n frog = Frog(-1.0, 5.0, -2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx > 0 # Should reverse horizontal velocity\n", + lines_add: "", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 21, + line2: 25, + lines_remove: + " def test_bounce_off_right_bank(self):\n frog = Frog(11.0, 5.0, 2.0, 0.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vx < 0 # Should reverse horizontal velocity\n", + lines_add: + 'class TestBoundaryBehavior:\n """Tests for frog\'s interaction with pond boundaries."""\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 26, + line2: 30, + lines_remove: + " def test_bounce_off_bottom(self):\n frog = Frog(5.0, -1.0, 0.0, -2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy > 0 # Should reverse vertical velocity\n", + lines_add: + ' @pytest.mark.parametrize("test_case", [\n # (starting_pos, starting_vel, expected_vel, description)\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n ((11.0, 5.0), (2.0, 0.0), (-2.0, 0.0), "right boundary"),\n ((5.0, -1.0), (0.0, -2.0), (0.0, 2.0), "bottom boundary"),\n ((5.0, 11.0), (0.0, 2.0), (0.0, -2.0), "top boundary")\n ])\n def test_boundary_bouncing(self, test_case, pond_dimensions):\n """\n Test bouncing behavior at all pond boundaries.\n \n Verifies that velocity is correctly reversed when the frog hits a boundary.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n pos, vel, expected, boundary = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.bounce_off_banks(*pond_dimensions)\n \n assert (frog.vx, frog.vy) == expected, \\\n f"Incorrect velocity after bouncing off {boundary}"\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 31, + line2: 35, + lines_remove: + " def test_bounce_off_top(self):\n frog = Frog(5.0, 11.0, 0.0, 2.0)\n frog.bounce_off_banks(10, 10)\n assert frog.vy < 0 # Should reverse vertical velocity\n", + lines_add: + ' def test_no_bounce_within_bounds(self, basic_frog, pond_dimensions):\n """\n Verify that no velocity changes occur when frog is within pond boundaries.\n """\n original_vx, original_vy = basic_frog.vx, basic_frog.vy\n \n basic_frog.bounce_off_banks(*pond_dimensions)\n \n assert (basic_frog.vx, basic_frog.vy) == (original_vx, original_vy), \\\n "Velocity should not change when frog is within bounds"\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 36, + line2: 42, + lines_remove: + " def test_no_bounce_within_bounds(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n original_vx, original_vy = frog.vx, frog.vy\n frog.bounce_off_banks(10, 10)\n assert frog.vx == original_vx\n assert frog.vy == original_vy\n", + lines_add: "", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 43, + line2: 51, + lines_remove: + "class TestFrogJumping:\n def test_normal_jump(self):\n frog = Frog(5.0, 5.0, 1.0, 1.0)\n expected_x = 5.0 + 1.0 * DT\n expected_y = 5.0 + 1.0 * DT\n frog.jump(10, 10)\n assert np.isclose(frog.x, expected_x)\n assert np.isclose(frog.y, expected_y)\n", + lines_add: + 'class TestMovement:\n """Tests for frog\'s movement mechanics."""\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 52, + line2: 57, + lines_remove: + " def test_jump_with_boundary_clipping(self):\n frog = Frog(9.9, 9.9, 10.0, 10.0)\n frog.jump(10, 10)\n assert frog.x == 10.0 # Should be clipped to pond width\n assert frog.y == 10.0 # Should be clipped to pond height\n", + lines_add: + ' def test_normal_jump(self, basic_frog, pond_dimensions):\n """\n Test standard jumping motion within pond boundaries.\n \n Verifies that position is updated correctly according to:\n new_position = old_position + velocity * time_step\n """\n initial_x, initial_y = basic_frog.x, basic_frog.y\n expected_x = initial_x + basic_frog.vx * DT\n expected_y = initial_y + basic_frog.vy * DT\n \n basic_frog.jump(*pond_dimensions)\n \n assert np.isclose(basic_frog.x, expected_x), \\\n "Incorrect x-position after jump"\n assert np.isclose(basic_frog.y, expected_y), \\\n "Incorrect y-position after jump"\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 58, + line2: 63, + lines_remove: + " def test_jump_with_negative_boundary_clipping(self):\n frog = Frog(0.1, 0.1, -10.0, -10.0)\n frog.jump(10, 10)\n assert frog.x == 0.0 # Should be clipped to minimum\n assert frog.y == 0.0 # Should be clipped to minimum\n", + lines_add: + ' @pytest.mark.parametrize("test_case", [\n # (pos, vel, expected_pos, description)\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n ((0.1, 0.1), (-10.0, -10.0), (0.0, 0.0), "lower bounds")\n ])\n def test_boundary_clipping(self, test_case, pond_dimensions):\n """\n Test position clipping at pond boundaries.\n \n Verifies that frog\'s position is properly constrained within pond limits.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n pos, vel, expected, bounds = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.jump(*pond_dimensions)\n \n assert (frog.x, frog.y) == expected, \\\n f"Incorrect position clipping at {bounds}"\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 64, + line2: 65, + lines_remove: "def test_croak():\n", + lines_add: + '\ndef test_croak_output():\n """\n Test frog\'s croaking behavior.\n \n Verifies that:\n - Correct number of croaks are produced\n - Each croak is on a new line\n - Output format matches expectations\n """\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 66, + line2: 67, + lines_remove: " # Capture stdout\n", + lines_add: " n_croaks = 3\n", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 70, + line2: 71, + lines_remove: " frog.croak(3)\n", + lines_add: + ' try:\n frog.croak(n_croaks)\n output = captured_output.getvalue().strip()\n expected_output = "croak\\n" * n_croaks\n finally:\n sys.stdout = sys.__stdout__\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 72, + line2: 78, + lines_remove: + ' # Restore stdout\n sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n expected_output = "croak\\ncroak\\ncroak"\n assert output == expected_output\n', + lines_add: + ' assert output == expected_output.strip(), \\\n f"Expected {n_croaks} croaks, got: {output}"\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 79, + line2: 80, + lines_remove: "def test_alternative_frog():\n", + lines_add: + '\ndef test_alternative_frog_existence():\n """\n Verify the existence and basic structure of AlternativeFrog class.\n \n This is a structural test to ensure the alternative implementation\n maintains the expected interface.\n """\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 81, + line2: 83, + lines_remove: + " # Just verify it can be instantiated and method exists\n alt_frog.alternative_jump()\n", + lines_add: + " assert hasattr(alt_frog, 'alternative_jump'), \\\n \"AlternativeFrog missing alternative_jump method\"\n alt_frog.alternative_jump() # Should not raise any exceptions\n", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 84, + line2: 84, + lines_remove: "", + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 85, + line2: 85, + lines_remove: "", + lines_add: + ' """\n Test the standalone jumping function\'s output.\n \n Verifies the exact string output of the function, including punctuation\n and spacing.\n """\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 88, + line2: 89, + lines_remove: " standalone_jumping_function()\n", + lines_add: + " try:\n standalone_jumping_function()\n output = captured_output.getvalue().strip()\n finally:\n sys.stdout = sys.__stdout__\n", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 90, + line2: 94, + lines_remove: + ' sys.stdout = sys.__stdout__\n \n output = captured_output.getvalue().strip()\n assert output == "I\'m a frog! Jump! Jump! Really!"\n', + lines_add: + ' expected = "I\'m a frog! Jump! Jump! Really!"\n assert output == expected, \\\n f"Unexpected output: {output}"\n', + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_019L5hGRf36DCj9q4BQfofBV", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nI've completely rewritten the tests with comprehensive documentation and better organization. Here are the key improvements:\n\n1. Added Module-Level Documentation\n- Clear description of what the test suite covers\n- Key components being tested\n- Overall structure\n\n2. Added Fixtures\n- `basic_frog`: Provides a standard frog instance for testing\n- `pond_dimensions`: Provides standard pond dimensions\n- Both fixtures are well-documented with docstrings\n\n3. Organized Tests into Logical Classes\n- `TestBoundaryBehavior`: Tests for boundary interactions\n- `TestMovement`: Tests for movement mechanics\n\n4. Improved Test Documentation\n- Each test function has a detailed docstring\n- Clear description of what is being tested\n- Expected behavior and conditions\n- Any special considerations\n\n5. Better Test Structure\n- Used parametrized tests where appropriate to reduce code duplication\n- Clear separation of setup, action, and assertion phases\n- Meaningful assertion messages\n\n6. Improved Error Handling\n- Proper cleanup of system resources (stdout capture)\n- Better error messages with context\n- Use of try/finally blocks where appropriate\n\n7. Better Test Names\n- More descriptive test names that indicate what's being tested\n- Consistent naming convention\n\nKey Features:\n1. Boundary Testing\n- Comprehensive tests for all pond boundaries\n- Tests for both position and velocity changes\n- Clear error messages for boundary violations\n\n2. Movement Testing\n- Precise testing of movement calculations\n- Boundary clipping verification\n- Multiple test cases for different scenarios\n\n3. Output Testing\n- Proper capture and verification of stdout\n- Exact string matching for croaks and messages\n- Resource cleanup after stdout capture\n\n4. Structure Testing\n- Verification of alternative implementations\n- Interface consistency checks\n- Existence of required methods\n\nThe tests are now more maintainable, readable, and provide better feedback when failures occur. Each test has a clear purpose and includes proper documentation about what it's testing and why.\n\nWould you like me to explain any particular part of the test suite in more detail?", + finish_reason: "stop", + }, + { + ftm_role: "user", + ftm_content: + "Great! Update the document and remove all docstrings one by one", + checkpoints: [ + { + workspace_folder: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", + commit_hash: "0609d3823dd9e8ac84674cefb5dcf5d0b691e890", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "I'll help you remove the docstrings one by one from the test file while preserving the functionality.", + ftm_tool_calls: [ + { + id: "toolu_01KmJrYK6xn2XqGLXpFA5Tgp", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": "\\"\\"\\"\\nTests for the Frog simulation module.\\n\\nThis test suite verifies the behavior of a frog in a bounded pond environment.\\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\\n\\nKey components tested:\\n- Frog initialization and state\\n- Boundary detection and bouncing behavior\\n- Movement and position updates\\n- Sound production (croaking)\\n\\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 1, + line2: 3, + lines_remove: '"""\nTests for the Frog simulation module.\n', + lines_add: "", + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 4, + line2: 13, + lines_remove: + 'This test suite verifies the behavior of a frog in a bounded pond environment.\nThe tests cover movement mechanics, boundary interactions, and various frog actions.\n\nKey components tested:\n- Frog initialization and state\n- Boundary detection and bouncing behavior\n- Movement and position updates\n- Sound production (croaking)\n"""\n', + lines_add: "", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01KmJrYK6xn2XqGLXpFA5Tgp", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01Smt5a61QbmbmZHpbNh7BKK", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Provides a basic frog instance for testing.\\n \\n Returns:\\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 12, + line2: 18, + lines_remove: + ' """\n Provides a basic frog instance for testing.\n \n Returns:\n Frog: A frog instance initialized at (5, 5) with velocity (1, 1)\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01Smt5a61QbmbmZHpbNh7BKK", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_0168Gio2kBMYhzKe6bdxErkb", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Provides standard pond dimensions for testing.\\n \\n Returns:\\n tuple: (width, height) of the pond, both set to 10 units\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 18, + line2: 24, + lines_remove: + ' """\n Provides standard pond dimensions for testing.\n \n Returns:\n tuple: (width, height) of the pond, both set to 10 units\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_0168Gio2kBMYhzKe6bdxErkb", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_016ChnUcGQGrjfbx2dSzmmeR", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Verify that a frog is correctly initialized with given parameters.\\n \\n Tests:\\n - Position coordinates (x, y)\\n - Velocity components (vx, vy)\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 23, + line2: 30, + lines_remove: + ' """\n Verify that a frog is correctly initialized with given parameters.\n \n Tests:\n - Position coordinates (x, y)\n - Velocity components (vx, vy)\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_016ChnUcGQGrjfbx2dSzmmeR", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01Fzi693dL7aKwE9gLFetqiN", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"Tests for frog\'s interaction with pond boundaries.\\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 35, + line2: 36, + lines_remove: + ' """Tests for frog\'s interaction with pond boundaries."""\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01Fzi693dL7aKwE9gLFetqiN", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01Xtt2L5ctWViSFce6JcNR2Z", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test bouncing behavior at all pond boundaries.\\n \\n Verifies that velocity is correctly reversed when the frog hits a boundary.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 45, + line2: 54, + lines_remove: + ' """\n Test bouncing behavior at all pond boundaries.\n \n Verifies that velocity is correctly reversed when the frog hits a boundary.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01Xtt2L5ctWViSFce6JcNR2Z", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01WoEFVoJi9wSp2XgTx5PCUo", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Verify that no velocity changes occur when frog is within pond boundaries.\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 55, + line2: 58, + lines_remove: + ' """\n Verify that no velocity changes occur when frog is within pond boundaries.\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01WoEFVoJi9wSp2XgTx5PCUo", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_017YWfgyTtHRLZjm3hrfCTBK", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"Tests for frog\'s movement mechanics.\\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 65, + line2: 66, + lines_remove: ' """Tests for frog\'s movement mechanics."""\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_017YWfgyTtHRLZjm3hrfCTBK", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01NsGmmfJfDR5UhmBE4N8kD4", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test standard jumping motion within pond boundaries.\\n \\n Verifies that position is updated correctly according to:\\n new_position = old_position + velocity * time_step\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 68, + line2: 74, + lines_remove: + ' """\n Test standard jumping motion within pond boundaries.\n \n Verifies that position is updated correctly according to:\n new_position = old_position + velocity * time_step\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01NsGmmfJfDR5UhmBE4N8kD4", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01EXrnY36zeat9gdMWAaiPDf", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test position clipping at pond boundaries.\\n \\n Verifies that frog\'s position is properly constrained within pond limits.\\n \\n Args:\\n test_case: Tuple containing test parameters\\n pond_dimensions: Standard pond dimensions from fixture\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 86, + line2: 95, + lines_remove: + ' """\n Test position clipping at pond boundaries.\n \n Verifies that frog\'s position is properly constrained within pond limits.\n \n Args:\n test_case: Tuple containing test parameters\n pond_dimensions: Standard pond dimensions from fixture\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01EXrnY36zeat9gdMWAaiPDf", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01KqLh5gkGZEMkXzRjdQWcev", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test frog\'s croaking behavior.\\n \\n Verifies that:\\n - Correct number of croaks are produced\\n - Each croak is on a new line\\n - Output format matches expectations\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 97, + line2: 105, + lines_remove: + ' """\n Test frog\'s croaking behavior.\n \n Verifies that:\n - Correct number of croaks are produced\n - Each croak is on a new line\n - Output format matches expectations\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01KqLh5gkGZEMkXzRjdQWcev", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01GtAM91bdJY74XQRBhpqh8K", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Verify the existence and basic structure of AlternativeFrog class.\\n \\n This is a structural test to ensure the alternative implementation\\n maintains the expected interface.\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 115, + line2: 121, + lines_remove: + ' """\n Verify the existence and basic structure of AlternativeFrog class.\n \n This is a structural test to ensure the alternative implementation\n maintains the expected interface.\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01GtAM91bdJY74XQRBhpqh8K", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01FrfBzW3RjxL3okQUqRiyWf", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "old_str": " \\"\\"\\"\\n Test the standalone jumping function\'s output.\\n \\n Verifies the exact string output of the function, including punctuation\\n and spacing.\\n \\"\\"\\"", "replacement": "", "multiple": false}', + name: "update_textdoc", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 123, + line2: 129, + lines_remove: + ' """\n Test the standalone jumping function\'s output.\n \n Verifies the exact string output of the function, including punctuation\n and spacing.\n """\n', + lines_add: "\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01FrfBzW3RjxL3okQUqRiyWf", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nNow all docstrings have been removed while preserving the test functionality. Here's a summary of what the test file contains:\n\n1. Fixtures:\n- basic_frog: Creates a standard frog at (5, 5) with velocity (1, 1)\n- pond_dimensions: Provides standard 10x10 pond dimensions\n\n2. Test Classes:\n- TestBoundaryBehavior: Tests boundary interactions with parametrized tests\n- TestMovement: Tests movement mechanics and position updates\n\n3. Standalone Tests:\n- test_frog_initialization: Basic initialization test\n- test_croak_output: Tests croaking functionality\n- test_alternative_frog_existence: Tests AlternativeFrog interface\n- test_standalone_jumping_function: Tests standalone function\n\nThe tests remain well-structured and maintain good coverage of the frog module's functionality, just without the documentation strings. Would you like me to show you the final version of the file or make any additional changes?", + finish_reason: "stop", + }, + { + ftm_role: "user", + ftm_content: "Great! Now remove all comments using regex\n", + checkpoints: [ + { + workspace_folder: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation", + commit_hash: "f1234facf0fd15104484f73c8e4cebc929a5d4d5", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "I'll use update_textdoc_regex to remove all comments from the test file.", + ftm_tool_calls: [ + { + id: "toolu_01LHPjgzJ8SEuczfm6Av7qfv", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "pattern": "\\\\s*#.*$", "replacement": "", "multiple": true}', + name: "update_textdoc_regex", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01LHPjgzJ8SEuczfm6Av7qfv", + ftm_content: + 'No replacement was performed, `pattern` \n```\n\\s*#.*$\n```\ndid not appear verbatim in "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py". Consider checking the file content using `cat()`', + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nLet me check the file content to see the exact format of comments:", + ftm_tool_calls: [ + { + id: "toolu_019iakkKqUjKP73EmEgVhCkZ", + function: { + arguments: + '{"paths": "tests/emergency_frog_situation/test_frog.py", "skeleton": false}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_019iakkKqUjKP73EmEgVhCkZ", + ftm_content: + "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py\n", + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ + { + file_name: "test_frog.py", + file_content: + '\n\nimport pytest\nimport numpy as np\nfrom frog import Frog, DT, AlternativeFrog, standalone_jumping_function\nimport io\nimport sys\n\n\n@pytest.fixture\ndef basic_frog():\n\n return Frog(5.0, 5.0, 1.0, 1.0)\n\n\n@pytest.fixture\ndef pond_dimensions():\n\n return (10.0, 10.0)\n\n\ndef test_frog_initialization():\n\n x, y = 1.0, 2.0\n vx, vy = 3.0, 4.0\n frog = Frog(x, y, vx, vy)\n \n assert frog.x == x, f"Expected x-position to be {x}"\n assert frog.y == y, f"Expected y-position to be {y}"\n assert frog.vx == vx, f"Expected x-velocity to be {vx}"\n assert frog.vy == vy, f"Expected y-velocity to be {vy}"\n\n\nclass TestBoundaryBehavior:\n\n\n @pytest.mark.parametrize("test_case", [\n # (starting_pos, starting_vel, expected_vel, description)\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n ((11.0, 5.0), (2.0, 0.0), (-2.0, 0.0), "right boundary"),\n ((5.0, -1.0), (0.0, -2.0), (0.0, 2.0), "bottom boundary"),\n ((5.0, 11.0), (0.0, 2.0), (0.0, -2.0), "top boundary")\n ])\n def test_boundary_bouncing(self, test_case, pond_dimensions):\n\n pos, vel, expected, boundary = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.bounce_off_banks(*pond_dimensions)\n \n assert (frog.vx, frog.vy) == expected, \\\n f"Incorrect velocity after bouncing off {boundary}"\n\n def test_no_bounce_within_bounds(self, basic_frog, pond_dimensions):\n\n original_vx, original_vy = basic_frog.vx, basic_frog.vy\n \n basic_frog.bounce_off_banks(*pond_dimensions)\n \n assert (basic_frog.vx, basic_frog.vy) == (original_vx, original_vy), \\\n "Velocity should not change when frog is within bounds"\n\n\nclass TestMovement:\n\n\n def test_normal_jump(self, basic_frog, pond_dimensions):\n\n initial_x, initial_y = basic_frog.x, basic_frog.y\n expected_x = initial_x + basic_frog.vx * DT\n expected_y = initial_y + basic_frog.vy * DT\n \n basic_frog.jump(*pond_dimensions)\n \n assert np.isclose(basic_frog.x, expected_x), \\\n "Incorrect x-position after jump"\n assert np.isclose(basic_frog.y, expected_y), \\\n "Incorrect y-position after jump"\n\n @pytest.mark.parametrize("test_case", [\n # (pos, vel, expected_pos, description)\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n ((0.1, 0.1), (-10.0, -10.0), (0.0, 0.0), "lower bounds")\n ])\n def test_boundary_clipping(self, test_case, pond_dimensions):\n\n pos, vel, expected, bounds = test_case\n frog = Frog(pos[0], pos[1], vel[0], vel[1])\n \n frog.jump(*pond_dimensions)\n \n assert (frog.x, frog.y) == expected, \\\n f"Incorrect position clipping at {bounds}"\n\n\ndef test_croak_output():\n\n frog = Frog(0, 0, 0, 0)\n n_croaks = 3\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n try:\n frog.croak(n_croaks)\n output = captured_output.getvalue().strip()\n expected_output = "croak\\n" * n_croaks\n finally:\n sys.stdout = sys.__stdout__\n \n assert output == expected_output.strip(), \\\n f"Expected {n_croaks} croaks, got: {output}"\n\n\ndef test_alternative_frog_existence():\n\n alt_frog = AlternativeFrog()\n assert hasattr(alt_frog, \'alternative_jump\'), \\\n "AlternativeFrog missing alternative_jump method"\n alt_frog.alternative_jump() # Should not raise any exceptions\n\n\ndef test_standalone_jumping_function():\n\n captured_output = io.StringIO()\n sys.stdout = captured_output\n \n try:\n standalone_jumping_function()\n output = captured_output.getvalue().strip()\n finally:\n sys.stdout = sys.__stdout__\n \n expected = "I\'m a frog! Jump! Jump! Really!"\n assert output == expected, \\\n f"Unexpected output: {output}"\n', + line1: 1, + line2: 134, + }, + ]), + }, + { + ftm_role: "assistant", + ftm_content: + "I see the comments now. Let me remove them using regex that matches both inline comments and comment lines:", + ftm_tool_calls: [ + { + id: "toolu_01PXY5nTtbMuiA4JAjN5TMtX", + function: { + arguments: + '{"path": "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", "pattern": "\\\\s*#.*(\\\\n|$)", "replacement": "\\\\n", "multiple": true}', + name: "update_textdoc_regex", + }, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 37, + line2: 40, + lines_remove: + ' @pytest.mark.parametrize("test_case", [\n # (starting_pos, starting_vel, expected_vel, description)\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n', + lines_add: + ' @pytest.mark.parametrize("test_case", [\\n ((-1.0, 5.0), (-2.0, 0.0), (2.0, 0.0), "left boundary"),\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 80, + line2: 83, + lines_remove: + ' @pytest.mark.parametrize("test_case", [\n # (pos, vel, expected_pos, description)\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n', + lines_add: + ' @pytest.mark.parametrize("test_case", [\\n ((9.9, 9.9), (10.0, 10.0), (10.0, 10.0), "upper bounds"),\n', + file_name_rename: null, + application_details: "", + }, + { + file_name: + "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py", + file_action: "edit", + line1: 119, + line2: 121, + lines_remove: + " alt_frog.alternative_jump() # Should not raise any exceptions\n\n", + lines_add: " alt_frog.alternative_jump()\\n\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01PXY5nTtbMuiA4JAjN5TMtX", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nGreat! All comments have been removed from the test file. The functionality remains exactly the same, but now without any inline comments or comment lines. The test file is now cleaner and more concise.\n\nWould you like me to show you the final version of the file or make any other changes?", + finish_reason: "stop", + }, +].map((message, index) => { + return { + ftm_belongs_to_ft_id: "test", + ftm_num: index, + ftm_alt: 100, + ftm_prev_alt: 100, + ftm_created_ts: Date.now(), + ftm_call_id: "", + ...message, + }; +}); diff --git a/refact-agent/gui/src/__fixtures__/markdown-issue.ts b/refact-agent/gui/src/__fixtures__/markdown-issue.ts index 3e53b9199..42fa4ef79 100644 --- a/refact-agent/gui/src/__fixtures__/markdown-issue.ts +++ b/refact-agent/gui/src/__fixtures__/markdown-issue.ts @@ -1,482 +1,475 @@ -import type { ChatThread } from "../features/Chat/Thread"; +import type { BaseMessage } from "../services/refact/types"; -export const MARKDOWN_ISSUE: ChatThread = { - id: "1e41a050-9846-40a3-9d20-691f8c215920", - messages: [ - { - ftm_role: "system", - ftm_content: - "[mode3] You are Refact Agent, an autonomous bot for coding tasks.\n\nCore Principles\n1. Use knowledge()\n - Always use knowledge() first when you encounter an agentic (complex) task.\n - This tool can access external data, including successful \"trajectories\" (examples of past solutions).\n - External database records begin with the icon \"🗃️\" followed by a record identifier.\n - Use these records to help solve your tasks by analogy.\n2. Use locate() with the Full Problem Statement\n - Provide the entire user request in the problem_statement argument to avoid losing any details (\"telephone game\" effect).\n - Include user's emotional stance, code snippets, formatting, instructions—everything word-for-word.\n - Only omit parts of the user's request if they are unrelated to the final solution.\n - Avoid using locate() if the problem is quite simple and can be solved without extensive project analysis.\n3. Execute Changes and Validate\n - When a solution requires file modifications, use the appropriate *_textdoc() tools.\n - After making changes, perform a validation step by reviewing modified files using cat() or similar tools.\n - Check for available build tools (like cmdline_cargo_check, cmdline_cargo_build, etc.) and use them to validate changes.\n - Ensure all changes are complete and consistent with the project's standards.\n - If build validation fails or other issues are found, collect additional context and revise the changes.\n\nAnswering Strategy\n1. If the user’s question is unrelated to the project\n - Answer directly without using any special calls.\n2. If the user’s question is related to the project\n - First, call knowledge() for relevant information and best practices.\n3. Making Changes\n - If a solution requires file changes, use `*_textdoc()` tools.\n - It's a good practice to call cat() to track changes for changed files.\n\nImportant Notes\n1. Parallel Exploration\n - When you explore different ideas, use multiple parallel methods.\n2. Project-Related Questions\n - For any project question, always call knowledge() before taking any action.\n3. Knowledge Building (Automatic)\n - After completing any significant task, AUTOMATICALLY use create_knowledge() without waiting for user prompting:\n * Important code patterns and their usage locations\n * Key relationships between classes/functions\n * File dependencies and project structure insights\n * Successful solution patterns for future reference\n - Proactively create knowledge entries whenever you:\n * Solve a problem or implement a feature\n * Discover patterns in the codebase\n * Learn something about project structure or dependencies\n * Fix a bug or identify potential issues\n * Analyze placeholders, test data, or configuration files\n - Consider each interaction an opportunity to build the knowledge base - don't wait for explicit instructions\n4. Continuous Learning\n - Treat every interaction as a learning opportunity\n - When you encounter interesting code patterns, project structures, or implementation details, document them\n - If you analyze placeholders, test data, or configuration files, record your findings\n - Don't wait for the user to ask you to remember - proactively build the knowledge base\n\nWhen running on user's laptop, you most likely have the shell() tool. It's for one-time dependency installations, or doing whatever\nuser is asking you to do. Tools the user can set up are better, because they don't require confimations when running on a laptop.\nWhen doing something for the project using shell() tool, offer the user to make a cmdline_* tool after you have successfully run\nthe shell() call. But double-check that it doesn't already exist, and it is actually typical for this kind of project. You can offer\nthis by writing:\n\n🧩SETTINGS:cmdline_cargo_check\n\nfrom a new line, that will open (when clicked) a wizard that creates `cargo check` (in this example) command line tool.\n\nIn a similar way, service_* tools work. The difference is cmdline_* is designed for non-interactive blocking commands that immediately\nreturn text in stdout/stderr, and service_* is designed for blocking background commands, such as hypercorn server that runs forever until you hit Ctrl+C.\nHere is another example:\n\n🧩SETTINGS:service_hypercorn\n\n\nYou might receive additional instructions that start with 💿. Those are not coming from the user, they are programmed to help you operate\nwell and they are always in English. Answer in the language the user has asked the question.\n\n\n- below general information about the current project -\n\nThe current IDE workspace has these project directories:\n/home/fupfv/git/benchmark1_0701\n\nThe active IDE file is:\n/home/fupfv/.config/Code/User/settings.json\nThere's no version control detected, complain to user if they want to use anything git/hg/svn/etc.\n\n\n\nIMPORTANT: Knowledge creation is not optional. After EVERY significant task, AUTOMATICALLY create a knowledge entry using create_knowledge() without waiting for user prompting. This is a core part of your workflow.\n", - }, - { - ftm_role: "user", - ftm_content: "Can you create comprehensive documentation?\n", - checkpoints: [ - { - workspace_folder: "/home/fupfv/git/benchmark1_0701", - commit_hash: "7e71dfea56b596c563725dd9772d1440185ef102", - }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "I'd be happy to help create comprehensive documentation for your project. To provide the most useful documentation, I need to understand what project you're working with and what kind of documentation you need.\n\nLet me first explore the project structure to get a better understanding of what we're working with.", - ftm_tool_calls: [ - { - id: "toolu_01JbWarAwzjMyV6azDkd5skX", - function: { - arguments: '{"use_ast": true}', - name: "tree", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01JbWarAwzjMyV6azDkd5skX", - ftm_content: - "/\n home/\n fupfv/\n git/\n benchmark1_0701/\n 12.zip\n LICENSE\n README.md\n VISUALIZATION.md\n example_new_file.py\n grafana-dashboard.json\n llm_load_test.zip\n llm_load_test/\n README.md\n requirements.txt\n src/\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.py\n load_test_report_20240811_002319.csv\n load_test_report_20240811_002319.json\n make_scripts_executable.sh\n requirements.txt\n results/\n run_20250129_152629/\n load_test_report_2025-01-29T152630.827620.csv\n load_test_report_2025-01-29T152630.827620.json\n load_test_report_2025-01-29T152636.621391.csv\n load_test_report_2025-01-29T152636.621391.json\n load_test_report_2025-01-29T152642.333384.csv\n load_test_report_2025-01-29T152642.333384.json\n load_test_report_2025-01-29T152648.032846.csv\n load_test_report_2025-01-29T152648.032846.json\n load_test_report_2025-01-29T152653.733025.csv\n load_test_report_2025-01-29T152653.733025.json\n load_test_report_2025-01-29T152659.442419.csv\n load_test_report_2025-01-29T152659.442419.json\n load_test_report_20250129_152704.csv\n load_test_report_20250129_152704.json\n run_20250129_152807/\n load_test_report_2025-01-29T152808.476840.csv\n load_test_report_2025-01-29T152808.476840.json\n load_test_report_2025-01-29T152814.290370.csv\n load_test_report_2025-01-29T152814.290370.json\n load_test_report_2025-01-29T152819.988992.csv\n load_test_report_2025-01-29T152819.988992.json\n load_test_report_2025-01-29T152825.712261.csv\n load_test_report_2025-01-29T152825.712261.json\n load_test_report_2025-01-29T152831.461047.csv\n load_test_report_2025-01-29T152831.461047.json\n load_test_report_2025-01-29T152837.233726.csv\n load_test_report_2025-01-29T152837.233726.json\n load_test_report_20250129_152842.csv\n load_test_report_20250129_152842.json\n run_20250129_152930/\n load_test_report_2025-01-29T153031.809694.csv\n load_test_report_2025-01-29T153031.809694.json\n load_test_report_2025-01-29T153137.610641.csv\n load_test_report_2025-01-29T153137.610641.json\n load_test_report_2025-01-29T153243.818603.csv\n load_test_report_2025-01-29T153243.818603.json\n load_test_report_2025-01-29T153349.887918.csv\n load_test_report_2025-01-29T153349.887918.json\n load_test_report_2025-01-29T153504.701174.csv\n load_test_report_2025-01-29T153504.701174.json\n load_test_report_2025-01-29T153615.800362.csv\n load_test_report_2025-01-29T153615.800362.json\n load_test_report_20250129_153620.csv\n load_test_report_20250129_153620.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n src/\n __pycache__/\n llm_test_logger.cpython-310.pyc\n load_test.cpython-310.pyc\n compare_runs.py\n dashboard_generator.py\n from transformers import AutoTokenizer.py\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.log\n load_test.py\n load_test_aggregator.py\n load_test_tgi.py\n load_test_vllm.py\n qwen_run_20250128_193328.zip\n qwen_run_20250129_131310.zip\n results/\n run_20250129_131310/\n load_test_report_2025-01-29T131340.582736.csv\n load_test_report_2025-01-29T131340.582736.json\n load_test_report_2025-01-29T131416.770529.csv\n load_test_report_2025-01-29T131416.770529.json\n load_test_report_2025-01-29T131452.904227.csv\n load_test_report_2025-01-29T131452.904227.json\n load_test_report_2025-01-29T131529.208363.csv\n load_test_report_2025-01-29T131529.208363.json\n load_test_report_2025-01-29T131612.332502.csv\n load_test_report_2025-01-29T131612.332502.json\n load_test_report_2025-01-29T131654.024454.csv\n load_test_report_2025-01-29T131654.024454.json\n load_test_report_20250129_131659.csv\n load_test_report_20250129_131659.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_131828/\n load_test_report_2025-01-29T131859.729718.csv\n load_test_report_2025-01-29T131859.729718.json\n load_test_report_2025-01-29T131935.556939.csv\n load_test_report_2025-01-29T131935.556939.json\n load_test_report_2025-01-29T132011.817203.csv\n load_test_report_2025-01-29T132011.817203.json\n load_test_report_2025-01-29T132047.948690.csv\n load_test_report_2025-01-29T132047.948690.json\n load_test_report_2025-01-29T132140.620425.csv\n load_test_report_2025-01-29T132140.620425.json\n load_test_report_2025-01-29T132237.254055.csv\n load_test_report_2025-01-29T132237.254055.json\n load_test_report_20250129_132242.csv\n load_test_report_20250129_132242.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_132842/\n load_test_report_2025-01-29T132913.096074.csv\n load_test_report_2025-01-29T132913.096074.json\n load_test_report_2025-01-29T132949.286127.csv\n load_test_report_2025-01-29T132949.286127.json\n load_test_report_2025-01-29T133025.273897.csv\n load_test_report_2025-01-29T133025.273897.json\n load_test_report_2025-01-29T133102.000762.csv\n load_test_report_2025-01-29T133102.000762.json\n load_test_report_2025-01-29T133154.340248.csv\n load_test_report_2025-01-29T133154.340248.json\n load_test_report_2025-01-29T133257.783732.csv\n load_test_report_2025-01-29T133257.783732.json\n load_test_report_20250129_133302.csv\n load_test_report_20250129_133302.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_133711/\n load_test_report_2025-01-29T133742.239356.csv\n load_test_report_2025-01-29T133742.239356.json\n load_test_report_2025-01-29T133818.175709.csv\n load_test_report_2025-01-29T133818.175709.json\n load_test_report_2025-01-29T133853.789246.csv\n load_test_report_2025-01-29T133853.789246.json\n load_test_report_2025-01-29T133929.633962.csv\n load_test_report_2025-01-29T133929.633962.json\n load_test_report_2025-01-29T134013.341083.csv\n load_test_report_2025-01-29T134013.341083.json\n load_test_report_2025-01-29T134101.336503.csv\n load_test_report_2025-01-29T134101.336503.json\n load_test_report_20250129_134106.csv\n load_test_report_20250129_134106.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_134818/\n load_test_report_2025-01-29T134919.598778.csv\n load_test_report_2025-01-29T134919.598778.json\n load_test_report_2025-01-29T135025.745361.csv\n load_test_report_2025-01-29T135025.745361.json\n load_test_report_2025-01-29T135131.347054.csv\n load_test_report_2025-01-29T135131.347054.json\n load_test_report_2025-01-29T135237.241605.csv\n load_test_report_2025-01-29T135237.241605.json\n load_test_report_2025-01-29T135352.526234.csv\n load_test_report_2025-01-29T135352.526234.json\n load_test_report_2025-01-29T135509.169860.csv\n load_test_report_2025-01-29T135509.169860.json\n load_test_report_20250129_135514.csv\n load_test_report_20250129_135514.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_135810/\n load_test_report_2025-01-29T135911.302460.csv\n load_test_report_2025-01-29T135911.302460.json\n load_test_report_2025-01-29T140017.766295.csv\n load_test_report_2025-01-29T140017.766295.json\n load_test_report_2025-01-29T140123.329253.csv\n load_test_report_2025-01-29T140123.329253.json\n load_test_report_2025-01-29T140229.087510.csv\n load_test_report_2025-01-29T140229.087510.json\n load_test_report_2025-01-29T140354.254251.csv\n load_test_report_2025-01-29T140354.254251.json\n load_test_report_2025-01-29T140522.596391.csv\n load_test_report_2025-01-29T140522.596391.json\n load_test_report_20250129_140527.csv\n load_test_report_20250129_140527.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_140726/\n load_test_report_2025-01-29T140828.249744.csv\n load_test_report_2025-01-29T140828.249744.json\n load_test_report_2025-01-29T140935.241087.csv\n load_test_report_2025-01-29T140935.241087.json\n load_test_report_2025-01-29T141041.737827.csv\n load_test_report_2025-01-29T141041.737827.json\n load_test_report_2025-01-29T141148.575547.csv\n load_test_report_2025-01-29T141148.575547.json\n load_test_report_2025-01-29T141257.979330.csv\n load_test_report_2025-01-29T141257.979330.json\n load_test_report_2025-01-29T141407.813467.csv\n load_test_report_2025-01-29T141407.813467.json\n load_test_report_2025-01-29T141517.031485.csv\n load_test_report_2025-01-29T141517.031485.json\n load_test_report_2025-01-29T141626.812125.csv\n load_test_report_2025-01-29T141626.812125.json\n load_test_report_2025-01-29T141738.980843.csv\n load_test_report_2025-01-29T141738.980843.json\n load_test_report_2025-01-29T141852.372524.csv\n load_test_report_2025-01-29T141852.372524.json\n load_test_report_2025-01-29T142006.313659.csv\n load_test_report_2025-01-29T142006.313659.json\n load_test_report_2025-01-29T142122.053494.csv\n load_test_report_2025-01-29T142122.053494.json\n load_test_report_20250129_142127.csv\n load_test_report_20250129_142127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_142324/\n load_test_report_2025-01-29T142426.095040.csv\n load_test_report_2025-01-29T142426.095040.json\n load_test_report_2025-01-29T142532.101781.csv\n load_test_report_2025-01-29T142532.101781.json\n load_test_report_2025-01-29T142638.130364.csv\n load_test_report_2025-01-29T142638.130364.json\n load_test_report_2025-01-29T142744.373122.csv\n load_test_report_2025-01-29T142744.373122.json\n load_test_report_2025-01-29T142851.436595.csv\n load_test_report_2025-01-29T142851.436595.json\n load_test_report_2025-01-29T142958.649875.csv\n load_test_report_2025-01-29T142958.649875.json\n load_test_report_2025-01-29T143105.820377.csv\n load_test_report_2025-01-29T143105.820377.json\n load_test_report_2025-01-29T143213.483254.csv\n load_test_report_2025-01-29T143213.483254.json\n load_test_report_2025-01-29T143322.075349.csv\n load_test_report_2025-01-29T143322.075349.json\n load_test_report_2025-01-29T143431.160350.csv\n load_test_report_2025-01-29T143431.160350.json\n load_test_report_2025-01-29T143540.792112.csv\n load_test_report_2025-01-29T143540.792112.json\n load_test_report_2025-01-29T143651.193158.csv\n load_test_report_2025-01-29T143651.193158.json\n load_test_report_20250129_143656.csv\n load_test_report_20250129_143656.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_144231/\n load_test_report_2025-01-29T144333.225207.csv\n load_test_report_2025-01-29T144333.225207.json\n load_test_report_2025-01-29T144441.892228.csv\n load_test_report_2025-01-29T144441.892228.json\n load_test_report_2025-01-29T144548.216391.csv\n load_test_report_2025-01-29T144548.216391.json\n load_test_report_2025-01-29T144654.207507.csv\n load_test_report_2025-01-29T144654.207507.json\n load_test_report_2025-01-29T144801.887104.csv\n load_test_report_2025-01-29T144801.887104.json\n load_test_report_2025-01-29T144907.892024.csv\n load_test_report_2025-01-29T144907.892024.json\n load_test_report_2025-01-29T145015.606306.csv\n load_test_report_2025-01-29T145015.606306.json\n load_test_report_2025-01-29T145124.318365.csv\n load_test_report_2025-01-29T145124.318365.json\n load_test_report_2025-01-29T145232.316758.csv\n load_test_report_2025-01-29T145232.316758.json\n load_test_report_2025-01-29T145338.561407.csv\n load_test_report_2025-01-29T145338.561407.json\n load_test_report_2025-01-29T145447.340833.csv\n load_test_report_2025-01-29T145447.340833.json\n load_test_report_2025-01-29T145556.603603.csv\n load_test_report_2025-01-29T145556.603603.json\n load_test_report_20250129_145601.csv\n load_test_report_20250129_145601.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_145926/\n load_test_report_2025-01-29T150027.790900.csv\n load_test_report_2025-01-29T150027.790900.json\n load_test_report_2025-01-29T150134.652497.csv\n load_test_report_2025-01-29T150134.652497.json\n load_test_report_2025-01-29T150242.312479.csv\n load_test_report_2025-01-29T150242.312479.json\n load_test_report_2025-01-29T150348.489497.csv\n load_test_report_2025-01-29T150348.489497.json\n load_test_report_2025-01-29T150454.976232.csv\n load_test_report_2025-01-29T150454.976232.json\n load_test_report_2025-01-29T150600.673114.csv\n load_test_report_2025-01-29T150600.673114.json\n load_test_report_2025-01-29T150708.380006.csv\n load_test_report_2025-01-29T150708.380006.json\n load_test_report_2025-01-29T150814.575034.csv\n load_test_report_2025-01-29T150814.575034.json\n load_test_report_2025-01-29T150923.544283.csv\n load_test_report_2025-01-29T150923.544283.json\n load_test_report_2025-01-29T151030.283486.csv\n load_test_report_2025-01-29T151030.283486.json\n load_test_report_2025-01-29T151138.589944.csv\n load_test_report_2025-01-29T151138.589944.json\n load_test_report_2025-01-29T151248.730621.csv\n load_test_report_2025-01-29T151248.730621.json\n load_test_report_20250129_151253.csv\n load_test_report_20250129_151253.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_160612/\n load_test_report_2025-01-29T160713.432216.csv\n load_test_report_2025-01-29T160713.432216.json\n load_test_report_2025-01-29T160819.907680.csv\n load_test_report_2025-01-29T160819.907680.json\n load_test_report_2025-01-29T160926.784918.csv\n load_test_report_2025-01-29T160926.784918.json\n load_test_report_2025-01-29T161033.828339.csv\n load_test_report_2025-01-29T161033.828339.json\n load_test_report_2025-01-29T161153.205639.csv\n load_test_report_2025-01-29T161153.205639.json\n load_test_report_2025-01-29T161315.237414.csv\n load_test_report_2025-01-29T161315.237414.json\n load_test_report_20250129_161320.csv\n load_test_report_20250129_161320.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_161925/\n load_test_report_2025-01-29T162025.734114.csv\n load_test_report_2025-01-29T162025.734114.json\n load_test_report_2025-01-29T162131.524371.csv\n load_test_report_2025-01-29T162131.524371.json\n load_test_report_2025-01-29T162237.758517.csv\n load_test_report_2025-01-29T162237.758517.json\n load_test_report_2025-01-29T162344.818406.csv\n load_test_report_2025-01-29T162344.818406.json\n load_test_report_2025-01-29T162507.384913.csv\n load_test_report_2025-01-29T162507.384913.json\n load_test_report_2025-01-29T162613.335853.csv\n load_test_report_2025-01-29T162613.335853.json\n load_test_report_20250129_162618.csv\n load_test_report_20250129_162618.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_162732/\n load_test_report_2025-01-29T162834.272459.csv\n load_test_report_2025-01-29T162834.272459.json\n load_test_report_2025-01-29T162941.672408.csv\n load_test_report_2025-01-29T162941.672408.json\n load_test_report_2025-01-29T163048.857712.csv\n load_test_report_2025-01-29T163048.857712.json\n load_test_report_2025-01-29T163157.624546.csv\n load_test_report_2025-01-29T163157.624546.json\n load_test_report_2025-01-29T163306.370415.csv\n load_test_report_2025-01-29T163306.370415.json\n load_test_report_2025-01-29T163416.065472.csv\n load_test_report_2025-01-29T163416.065472.json\n load_test_report_2025-01-29T163524.604470.csv\n load_test_report_2025-01-29T163524.604470.json\n load_test_report_2025-01-29T163632.880248.csv\n load_test_report_2025-01-29T163632.880248.json\n load_test_report_2025-01-29T163745.002002.csv\n load_test_report_2025-01-29T163745.002002.json\n load_test_report_2025-01-29T163902.036068.csv\n load_test_report_2025-01-29T163902.036068.json\n load_test_report_2025-01-29T164009.453151.csv\n load_test_report_2025-01-29T164009.453151.json\n load_test_report_2025-01-29T164122.568066.csv\n load_test_report_2025-01-29T164122.568066.json\n load_test_report_20250129_164127.csv\n load_test_report_20250129_164127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_164620/\n load_test_report_2025-01-29T164721.700661.csv\n load_test_report_2025-01-29T164721.700661.json\n load_test_report_2025-01-29T164827.520353.csv\n load_test_report_2025-01-29T164827.520353.json\n load_test_report_2025-01-29T164933.310367.csv\n load_test_report_2025-01-29T164933.310367.json\n load_test_report_2025-01-29T165039.642351.csv\n load_test_report_2025-01-29T165039.642351.json\n load_test_report_2025-01-29T165154.098239.csv\n load_test_report_2025-01-29T165154.098239.json\n load_test_report_2025-01-29T165308.831481.csv\n load_test_report_2025-01-29T165308.831481.json\n load_test_report_20250129_165313.csv\n load_test_report_20250129_165313.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_165758/\n load_test_report_2025-01-29T165859.461686.csv\n load_test_report_2025-01-29T165859.461686.json\n load_test_report_2025-01-29T170005.472004.csv\n load_test_report_2025-01-29T170005.472004.json\n load_test_report_2025-01-29T170111.422122.csv\n load_test_report_2025-01-29T170111.422122.json\n load_test_report_2025-01-29T170217.557618.csv\n load_test_report_2025-01-29T170217.557618.json\n load_test_report_2025-01-29T170330.493971.csv\n load_test_report_2025-01-29T170330.493971.json\n load_test_report_2025-01-29T170447.558129.csv\n load_test_report_2025-01-29T170447.558129.json\n load_test_report_20250129_170452.csv\n load_test_report_20250129_170452.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_170950/\n load_test_report_2025-01-29T171051.361008.csv\n load_test_report_2025-01-29T171051.361008.json\n load_test_report_2025-01-29T171157.323565.csv\n load_test_report_2025-01-29T171157.323565.json\n load_test_report_2025-01-29T171303.299586.csv\n load_test_report_2025-01-29T171303.299586.json\n load_test_report_2025-01-29T171409.108765.csv\n load_test_report_2025-01-29T171409.108765.json\n load_test_report_2025-01-29T171514.861147.csv\n load_test_report_2025-01-29T171514.861147.json\n load_test_report_2025-01-29T171620.615624.csv\n load_test_report_2025-01-29T171620.615624.json\n load_test_report_2025-01-29T171726.893447.csv\n load_test_report_2025-01-29T171726.893447.json\n load_test_report_2025-01-29T171833.044767.csv\n load_test_report_2025-01-29T171833.044767.json\n load_test_report_2025-01-29T171939.151837.csv\n load_test_report_2025-01-29T171939.151837.json\n load_test_report_2025-01-29T172045.358719.csv\n load_test_report_2025-01-29T172045.358719.json\n load_test_report_2025-01-29T172151.647824.csv\n load_test_report_2025-01-29T172151.647824.json\n load_test_report_2025-01-29T172257.931381.csv\n load_test_report_2025-01-29T172257.931381.json\n load_test_report_2025-01-29T172404.993732.csv\n load_test_report_2025-01-29T172404.993732.json\n load_test_report_2025-01-29T172512.469972.csv\n load_test_report_2025-01-29T172512.469972.json\n load_test_report_2025-01-29T172619.912159.csv\n load_test_report_2025-01-29T172619.912159.json\n load_test_report_2025-01-29T172727.520335.csv\n load_test_report_2025-01-29T172727.520335.json\n load_test_report_2025-01-29T172836.287202.csv\n load_test_report_2025-01-29T172836.287202.json\n load_test_report_2025-01-29T172945.243054.csv\n load_test_report_2025-01-29T172945.243054.json\n load_test_report_2025-01-29T173054.878245.csv\n load_test_report_2025-01-29T173054.878245.json\n load_test_report_2025-01-29T173205.270695.csv\n load_test_report_2025-01-29T173205.270695.json\n load_test_report_2025-01-29T173319.135777.csv\n load_test_report_2025-01-29T173319.135777.json\n load_test_report_2025-01-29T173434.082094.csv\n load_test_report_2025-01-29T173434.082094.json\n load_test_report_2025-01-29T173550.513858.csv\n load_test_report_2025-01-29T173550.513858.json\n load_test_report_2025-01-29T173708.906195.csv\n load_test_report_2025-01-29T173708.906195.json\n load_test_report_20250129_173713.csv\n load_test_report_20250129_173713.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_174215/\n load_test_report_2025-01-29T174316.520550.csv\n load_test_report_2025-01-29T174316.520550.json\n load_test_report_2025-01-29T174422.384594.csv\n load_test_report_2025-01-29T174422.384594.json\n load_test_report_2025-01-29T174528.291764.csv\n load_test_report_2025-01-29T174528.291764.json\n load_test_report_2025-01-29T174633.925509.csv\n load_test_report_2025-01-29T174633.925509.json\n load_test_report_2025-01-29T174740.096886.csv\n load_test_report_2025-01-29T174740.096886.json\n load_test_report_2025-01-29T174845.697959.csv\n load_test_report_2025-01-29T174845.697959.json\n load_test_report_2025-01-29T174952.084484.csv\n load_test_report_2025-01-29T174952.084484.json\n load_test_report_2025-01-29T175058.845237.csv\n load_test_report_2025-01-29T175058.845237.json\n load_test_report_2025-01-29T175205.494738.csv\n load_test_report_2025-01-29T175205.494738.json\n load_test_report_2025-01-29T175312.831611.csv\n load_test_report_2025-01-29T175312.831611.json\n load_test_report_2025-01-29T175419.902976.csv\n load_test_report_2025-01-29T175419.902976.json\n load_test_report_2025-01-29T175527.241889.csv\n load_test_report_2025-01-29T175527.241889.json\n load_test_report_2025-01-29T175635.835204.csv\n load_test_report_2025-01-29T175635.835204.json\n load_test_report_2025-01-29T175744.448069.csv\n load_test_report_2025-01-29T175744.448069.json\n load_test_report_2025-01-29T175853.905293.csv\n load_test_report_2025-01-29T175853.905293.json\n load_test_report_2025-01-29T180003.565666.csv\n load_test_report_2025-01-29T180003.565666.json\n load_test_report_2025-01-29T180115.557518.csv\n load_test_report_2025-01-29T180115.557518.json\n load_test_report_2025-01-29T180228.466492.csv\n load_test_report_2025-01-29T180228.466492.json\n load_test_report_2025-01-29T180342.419821.csv\n load_test_report_2025-01-29T180342.419821.json\n load_test_report_2025-01-29T180457.796778.csv\n load_test_report_2025-01-29T180457.796778.json\n load_test_report_2025-01-29T180620.304565.csv\n load_test_report_2025-01-29T180620.304565.json\n load_test_report_2025-01-29T180746.057385.csv\n load_test_report_2025-01-29T180746.057385.json\n load_test_report_2025-01-29T180914.893498.csv\n load_test_report_2025-01-29T180914.893498.json\n load_test_report_2025-01-29T181046.064760.csv\n load_test_report_2025-01-29T181046.064760.json\n load_test_report_20250129_181051.csv\n load_test_report_20250129_181051.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n starcoder2_run_20250129_123907.zip\n starcoder_run_20250128_20.zip\n starcoder_run_20250129_131828.zip\n test_single_request.py\n visualize_results.py\n temp_file_renamed.txt\n test_data.txt", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nNow, let me examine the README.md file to understand the project's purpose and structure:", - ftm_tool_calls: [ - { - id: "toolu_01DzbsASaCkQXY2fmUYZjXbj", - function: { - arguments: '{"paths": "README.md", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01DzbsASaCkQXY2fmUYZjXbj", - ftm_content: - 'Problems:\nThe path "README.md" is ambiguous. It could be interpreted as:\n/home/fupfv/git/benchmark1_0701/README.md\n/home/fupfv/git/benchmark1_0701/llm_load_test/README.md\n', - }, - { - ftm_role: "assistant", - ftm_content: "\n\nLet me check both README files:", - ftm_tool_calls: [ - { - id: "toolu_01LUEfJUST74ghMbHUmuByr5", - function: { - arguments: - '{"paths": "/home/fupfv/git/benchmark1_0701/README.md", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01LUEfJUST74ghMbHUmuByr5", - ftm_content: "Paths found:\n/home/fupfv/git/benchmark1_0701/README.md\n", - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "benchmark1_0701/README.md", - file_content: - "# LLM Load Testing Tool\n\nA tool for load testing LLM inference servers using Locust, with comprehensive visualization capabilities.\n\n## Setup\n\n1. Install dependencies:\n```bash\npip install -r requirements.txt\n```\n\n2. Run the test:\n```bash\npython src/llm_load_test_runner.py\n```\n\n3. Make visualization scripts executable (optional):\n```bash\nbash make_scripts_executable.sh\n```\n\n## Configuration\n\nThe test runs with these default parameters:\n- Users: 1, 2, 50\n- Output tokens: 15, 30\n- Run time: 60s per test\n- Prompt tokens: 4046\n- Temperature: 1.0\n\n## Reports\n\nReports are generated in the `results/run_YYYYMMDD_HHMMSS/` directory:\n- Individual test results: `results_test_*.csv`\n- Summary reports: \n - `load_test_report_YYYYMMDD_HHMMSS.csv`\n - `load_test_report_YYYYMMDD_HHMMSS.json`\n\n## Example Report Format\n\nThe CSV report includes:\n- Response metrics (total latency, first token latency)\n- Number of tokens generated\n- Characters generated\n- Requests per second\n- Success/failure rates\n\n## Visualization Tools\n\nThe project includes several visualization tools to help analyze test results:\n\n### 1. Single Run Visualization\n\nGenerate comprehensive visualizations for a single test run:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\n### 2. Run Comparison\n\nCompare results across multiple test runs:\n\n```bash\npython src/compare_runs.py --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\n```\n\n### 3. Grafana Dashboard Generator\n\nCreate a Grafana dashboard configuration for real-time monitoring:\n\n```bash\npython src/dashboard_generator.py --output-file grafana-dashboard.json\n```\n\nFor detailed information about the visualization tools, see [VISUALIZATION.md](VISUALIZATION.md).\n", - line1: 1, - line2: 75, - }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: - "Let me check the VISUALIZATION.md file and the main source files to understand the project better:", - ftm_tool_calls: [ - { - id: "toolu_019uJg1SQ5evTU9bov6iS9hU", - function: { - arguments: - '{"paths": "/home/fupfv/git/benchmark1_0701/VISUALIZATION.md", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_019uJg1SQ5evTU9bov6iS9hU", - ftm_content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/VISUALIZATION.md\n", - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "VISUALIZATION.md", - file_content: - "# LLM Benchmark Visualization Tools\n\nThis document describes the visualization tools available for analyzing the results of LLM load tests.\n\n## Available Visualization Tools\n\nThe project includes three main visualization tools:\n\n1. **visualize_results.py** - Generate visualizations for a single test run\n2. **compare_runs.py** - Compare results across multiple test runs\n3. **dashboard_generator.py** - Generate Grafana dashboard configurations for real-time monitoring\n\n## Prerequisites\n\nInstall the required dependencies:\n\n```bash\npip install pandas matplotlib seaborn numpy\n```\n\nFor Grafana dashboards, you'll need to have Grafana installed and configured.\n\n## 1. Visualize Results\n\nThe `visualize_results.py` script generates various charts and visualizations from a single test run.\n\n### Usage\n\n```bash\npython src/visualize_results.py --results-dir results/run_20250129_174215 --output-dir visualizations\n```\n\n### Parameters\n\n- `--results-dir`: Directory containing test results (default: results)\n- `--output-dir`: Directory to save visualizations (default: results_dir/visualizations)\n- `--run`: Specific run directory to analyze (default: analyze all runs)\n\n### Generated Visualizations\n\n- Latency by concurrency level\n- Throughput (QPS) by concurrency level\n- Latency by output token count\n- Heatmap of latency by concurrency and output tokens\n- Model comparison (if multiple models)\n- Run comparison (if multiple runs)\n- Latency distribution\n- Token generation speed\n- Summary report (markdown)\n\n## 2. Compare Runs\n\nThe `compare_runs.py` script compares results from different test runs to identify performance differences, regressions, or improvements.\n\n### Usage\n\n```bash\npython src/compare_runs.py --base-dir results --runs run_20250129_174215 run_20250129_170950 --output-dir comparisons\n```\n\n### Parameters\n\n- `--base-dir`: Base directory containing run directories (default: results)\n- `--runs`: Specific run directories to compare (default: all runs)\n- `--output-dir`: Directory to save comparison visualizations\n\n### Generated Comparisons\n\n- Latency metrics comparison across runs\n- Throughput comparison across runs\n- Performance by concurrency level across runs\n- Performance by output token count across runs\n- Heatmap comparisons\n- Summary table with key metrics\n- Improvement metrics (when comparing multiple runs)\n\n## 3. Dashboard Generator\n\nThe `dashboard_generator.py` script generates a Grafana dashboard configuration from test results for real-time monitoring.\n\n### Usage\n\n```bash\npython src/dashboard_generator.py --results-dir results --output-file grafana-dashboard.json\n```\n\n### Parameters\n\n- `--results-dir`: Directory containing test results (default: results)\n- `--output-file`: Path to save the dashboard JSON file\n\n### Dashboard Features\n\n- Real-time latency metrics\n- Throughput metrics\n- Token metrics\n- Token generation speed\n- Key statistics\n- Model comparison\n\n### Importing to Grafana\n\n1. Open Grafana in your web browser\n2. Navigate to Dashboards > Import\n3. Upload the generated JSON file or paste its contents\n4. Configure the data source if prompted\n5. Click Import\n\n## Example Workflow\n\n1. Run load tests with different configurations:\n ```bash\n python src/llm_load_test_runner.py\n ```\n\n2. Generate visualizations for a specific run:\n ```bash\n python src/visualize_results.py --results-dir results/run_20250129_174215\n ```\n\n3. Compare multiple runs:\n ```bash\n python src/compare_runs.py --runs run_20250129_174215 run_20250129_170950\n ```\n\n4. Generate a Grafana dashboard:\n ```bash\n python src/dashboard_generator.py --output-file grafana-dashboard.json\n ```\n\n## Tips for Effective Analysis\n\n1. **Consistent Test Parameters**: When comparing runs, ensure that the test parameters (prompt tokens, output tokens, etc.) are consistent.\n\n2. **Baseline Comparison**: Always include a baseline run when comparing performance improvements.\n\n3. **Isolate Variables**: When testing for specific improvements, change only one variable at a time.\n\n4. **Real-time Monitoring**: Use the Grafana dashboard for real-time monitoring during long-running tests.\n\n5. **Anomaly Detection**: Look for outliers in the latency distribution charts to identify potential issues.\n\n6. **Scaling Analysis**: Pay special attention to how performance metrics change with increasing concurrency levels.\n\n## Troubleshooting\n\n- **Missing Data**: If visualizations show missing data, check that the CSV files contain the expected columns.\n\n- **Pandas Errors**: Ensure you have the latest version of pandas installed (`pip install --upgrade pandas`).\n\n- **Matplotlib Issues**: If plots don't render correctly, try updating matplotlib (`pip install --upgrade matplotlib`).\n\n- **File Not Found**: Verify that the specified directories exist and contain the expected data files.\n\n- **Memory Errors**: For very large datasets, consider filtering the data or using a machine with more memory.\n", - line1: 1, - line2: 154, - }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: - "Now let me examine the main source files to understand how the tool works:", - ftm_tool_calls: [ - { - id: "toolu_01T48o7umn3FG5JVJWRgviUP", - function: { - arguments: - '{"paths": "/home/fupfv/git/benchmark1_0701/src/llm_load_test_runner.py", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01T48o7umn3FG5JVJWRgviUP", - ftm_content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/src/llm_load_test_runner.py\n", - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "benchmark1_0701/src/llm_load_test_runner.py", - file_content: - 'import subprocess\nimport time\nimport csv\nimport json\nimport os\nimport sys\nfrom datetime import datetime\nfrom llm_test_logger import LLMTestLogger\n\nclass LLMLoadTest:\n def __init__(self, base_config, results_dir):\n self.base_config = base_config\n self.results_dir = results_dir\n self.results = []\n self.logger = LLMTestLogger(os.path.join(results_dir, "detailed_logs"))\n\n # Create the results directory if it doesn\'t exist\n os.makedirs(self.results_dir, exist_ok=True)\n\n def write_test_report(self, test_name, response_text, duration, exit_code, prompt_tokens, provider=None, model=None, generation_tokens=None, stream=None, temperature=None, logprobs=None, concurrency=None, time_to_first_token=None, latency_per_token=None, num_tokens=None, total_latency=None, num_requests=None, qps=None):\n timestamp = datetime.now().isoformat()\n report_data = {\n "Response received": response_text,\n "test_name": test_name,\n "duration": duration,\n "exit_code": exit_code,\n "Prompt Tokens": prompt_tokens,\n "Provider": provider,\n "Model": model,\n "Generation Tokens": generation_tokens,\n "Stream": stream,\n "Temperature": temperature,\n "Logprobs": logprobs,\n "Concurrency": concurrency,\n "Time To First Token": time_to_first_token,\n "Latency Per Token": latency_per_token,\n "Num Tokens": num_tokens,\n "Total Latency": total_latency,\n "Num Requests": num_requests,\n "Qps": qps,\n "_timestamp": timestamp\n }\n\n # Write JSON report\n json_report_path = os.path.join(self.results_dir, "load_test_report_" + timestamp.replace(":", "") + ".json")\n with open(json_report_path, "w") as f:\n json.dump([report_data], f, indent=2)\n\n # Write CSV report\n csv_report_path = os.path.join(self.results_dir, "load_test_report_" + timestamp.replace(":", "") + ".csv")\n with open(csv_report_path, "w", newline="") as f:\n writer = csv.writer(f)\n writer.writerow(["Response received", "Provider", "Model", "Prompt Tokens", "Generation Tokens", \n "Stream", "Temperature", "Logprobs", "Concurrency", "Time To First Token",\n "Latency Per Token", "Num Tokens", "Total Latency", "Num Requests", "Qps",\n "test_name", "duration", "exit_code"])\n writer.writerow([response_text, provider, model, prompt_tokens, generation_tokens,\n stream, temperature, logprobs, concurrency, time_to_first_token,\n latency_per_token, num_tokens, total_latency, num_requests, qps,\n test_name, duration, exit_code])\n\n def run_test(self, test_name, users, output_tokens):\n print(f"Running test: {test_name}")\n \n # Store max_tokens in base_config for later use in parse_output\n self.base_config[\'max-tokens\'] = output_tokens\n \n # Construct the command with additional parameters to ensure exact token count and proper test duration\n command = (f"locust -f {os.path.join(os.path.dirname(__file__), \'load_test.py\')} --headless "\n f"--host {self.base_config[\'host\']} "\n f"--provider {self.base_config[\'provider\']} "\n f"--model {self.base_config[\'model\']} "\n f"--api-key {self.base_config[\'api-key\']} "\n f"--logprobs {self.base_config[\'logprobs\']} "\n f"--run-time {self.base_config.get(\'run-time\', \'1m\')} "\n f"--users {users} "\n f"--spawn-rate {users} "\n f"--prompt-tokens {self.base_config.get(\'prompt-tokens\', 4046)} "\n f"--max-tokens {output_tokens} "\n f"--temperature {self.base_config.get(\'temperature\', 1.0)} "\n f"--expect-workers 1 " # Ensure proper worker initialization\n f"--stop-timeout 60 " # Increased timeout to match run-time\n f"--summary-file {self.results_dir}/results_{test_name}.csv "\n f"--no-stream " # Changed from --stream false to --no-stream\n f"--exit-code-on-error 1") # Exit with error code on failure\n print(f"Command: {command}")\n \n # Run the command and capture output\n start_time = time.time()\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n \n stdout_data = []\n stderr_data = []\n \n # Process output in real-time and ensure minimum runtime\n while True:\n # Read from stdout and stderr\n stdout_line = process.stdout.readline()\n stderr_line = process.stderr.readline()\n \n if stdout_line:\n print(stdout_line.strip())\n stdout_data.append(stdout_line)\n if stderr_line:\n print(stderr_line.strip())\n stderr_data.append(stderr_line)\n \n # Check if process has finished\n if process.poll() is not None:\n # Read any remaining output\n remaining_stdout, remaining_stderr = process.communicate()\n if remaining_stdout:\n stdout_data.append(remaining_stdout)\n if remaining_stderr:\n stderr_data.append(remaining_stderr)\n break\n \n # Check elapsed time\n elapsed_time = time.time() - start_time\n min_runtime = float(self.base_config.get(\'run-time\', \'30\').rstrip(\'s\'))\n \n if elapsed_time < min_runtime:\n time.sleep(0.1) # Small sleep to prevent CPU spinning\n continue\n \n duration = time.time() - start_time\n return_code = process.poll()\n \n # Ensure the test ran for the minimum duration\n if duration < float(self.base_config.get(\'run-time\', \'30\').rstrip(\'s\')):\n print(f"WARNING: Test duration {duration:.2f}s was shorter than requested {self.base_config.get(\'run-time\')}")\n return_code = 1\n \n # Parse metrics from output\n output = \'\'.join(stdout_data)\n metrics = self.parse_output(output)\n \n if metrics:\n metrics.update({\n \'test_name\': test_name,\n \'duration\': duration,\n \'exit_code\': return_code,\n \'Prompt Tokens\': self.base_config.get(\'prompt-tokens\', 4046),\n \'Concurrency\': users\n })\n self.results.append(metrics)\n \n # Write individual test report\n self.write_test_report(\n test_name=test_name,\n response_text=metrics.get(\'Response received\', \'\'),\n duration=duration,\n exit_code=return_code,\n prompt_tokens=metrics.get(\'Prompt Tokens\'),\n provider=metrics.get(\'Provider\'),\n model=metrics.get(\'Model\'),\n generation_tokens=metrics.get(\'Generation Tokens\'),\n stream=metrics.get(\'Stream\'),\n temperature=metrics.get(\'Temperature\'),\n logprobs=metrics.get(\'Logprobs\'),\n concurrency=metrics.get(\'Concurrency\'),\n time_to_first_token=metrics.get(\'Time To First Token\'),\n latency_per_token=metrics.get(\'Latency Per Token\'),\n num_tokens=metrics.get(\'Num Tokens\'),\n total_latency=metrics.get(\'Total Latency\'),\n num_requests=metrics.get(\'Num Requests\'),\n qps=metrics.get(\'Qps\')\n )\n\n def _parse_response(response_json):\n # First try usage.completion_tokens\n if \'usage\' in response_json and \'completion_tokens\' in response_json[\'usage\']:\n tokens = response_json[\'usage\'][\'completion_tokens\']\n # Then try generated_tokens_n\n elif \'generated_tokens_n\' in response_json:\n tokens = response_json[\'generated_tokens_n\']\n else:\n tokens = 0 # fallback if no token count available\n \n # Extract text from choices\n text = ""\n if \'choices\' in response_json and len(response_json[\'choices\']) > 0:\n if \'text\' in response_json[\'choices\'][0]:\n text = response_json[\'choices\'][0][\'text\']\n \n return {\n \'tokens\': tokens,\n \'text\': text,\n \'chars\': len(text) if text else 0\n }\n\n def process_completion_response(response, start_time):\n try:\n response_json = response.json()\n parsed = _parse_response(response_json)\n \n end_time = time.time()\n total_time = (end_time - start_time) * 1000 # Convert to milliseconds\n \n return {\n \'total_latency\': total_time,\n \'first_token_latency\': total_time, # Since we\'re not streaming, they\'re the same\n \'num_tokens\': parsed[\'tokens\'],\n \'text\': parsed[\'text\'],\n \'chars\': parsed[\'chars\']\n }\n \n except Exception as e:\n print(f"Error processing response: {e}")\n return None\n\n def parse_output(self, output):\n metrics = {}\n response_line = None\n \n for line in output.split(\'\\n\'):\n # Capture the response metrics line\n if line.startswith("Response received:"):\n response_line = line.strip()\n metrics[\'Response received\'] = response_line\n \n # Parse the response metrics\n if "total" in line and "first token" in line:\n try:\n # Extract total time\n total_time = float(line.split("total")[1].split("ms")[0].strip())\n metrics[\'Total Latency\'] = total_time\n \n # Extract first token time\n first_token = float(line.split("first token")[1].split("ms")[0].strip())\n metrics[\'Time To First Token\'] = first_token\n \n # Extract number of tokens\n tokens = int(line.split("tokens")[0].split(",")[-1].strip())\n metrics[\'Num Tokens\'] = tokens\n \n # Calculate latency per token\n if tokens > 0:\n latency_per_token = (total_time - first_token) / tokens\n metrics[\'Latency Per Token\'] = latency_per_token\n except (ValueError, IndexError) as e:\n print(f"Warning: Failed to parse metrics from line: {line}")\n print(f"Error: {str(e)}")\n \n # Parse other metrics from the stats table\n elif "POST" in line and "/v1/completions" in line:\n parts = [p.strip() for p in line.split("|") if p.strip()]\n if len(parts) >= 4:\n try:\n metrics[\'Num Requests\'] = int(parts[1].split()[0])\n qps = float(parts[-1].split()[0])\n metrics[\'Qps\'] = qps\n except (ValueError, IndexError) as e:\n print(f"Warning: Failed to parse POST metrics: {line}")\n print(f"Error: {str(e)}")\n \n # Parse provider and model info\n elif "Provider" in line and "using model" in line:\n try:\n parts = line.split("Provider")[1].split("using model")\n metrics[\'Provider\'] = parts[0].strip().strip("*")\n metrics[\'Model\'] = parts[1].strip().strip("*")\n except IndexError as e:\n print(f"Warning: Failed to parse provider/model info: {line}")\n print(f"Error: {str(e)}")\n \n # Add configuration metrics\n metrics[\'Stream\'] = False # Changed from hardcoded \'True\' to match actual config\n metrics[\'Temperature\'] = 1.0\n metrics[\'Logprobs\'] = 5\n metrics[\'Generation Tokens\'] = metrics.get(\'Num Tokens\', 50) # Default to max tokens if not found\n \n return metrics\n\n def generate_report(self):\n if not self.results:\n print("Warning: No results to generate report from")\n return\n\n timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")\n csv_filename = os.path.join(self.results_dir, f\'load_test_report_{timestamp}.csv\')\n json_filename = os.path.join(self.results_dir, f\'load_test_report_{timestamp}.json\')\n\n fieldnames = [\n \'Response received\', \'Provider\', \'Model\', \'Prompt Tokens\', \'Generation Tokens\',\n \'Stream\', \'Temperature\', \'Logprobs\', \'Concurrency\', \'Time To First Token\',\n \'Latency Per Token\', \'Num Tokens\', \'Total Latency\', \'Num Requests\', \'Qps\',\n \'test_name\', \'duration\', \'exit_code\'\n ]\n\n # Ensure all numeric fields are properly formatted\n for result in self.results:\n for field in [\'Time To First Token\', \'Latency Per Token\', \'Num Tokens\', \n \'Total Latency\', \'Num Requests\', \'Qps\']:\n if field in result and result[field] is not None:\n if isinstance(result[field], (int, float)):\n result[field] = f"{result[field]:.2f}" if isinstance(result[field], float) else str(result[field])\n\n with open(csv_filename, \'w\', newline=\'\') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for result in self.results:\n row = {k: (result.get(k, \'N/A\') if result.get(k) is not None else \'N/A\') \n for k in fieldnames}\n writer.writerow(row)\n\n with open(json_filename, \'w\') as f:\n json.dump(self.results, f, indent=2)\n\n print(f"Reports generated: {csv_filename} and {json_filename}")\n\ndef main():\n base_config = {\n "host": "https://dogfood.pilot.smallcloud.ai/",\n "provider": "openai",\n "model": "qwen2.5/coder/1.5b/base/vllm", # Updated model name to match the command\n "api-key": "d2aed710-a47b-4a3f-84f4-b02628c45e49",\n "logprobs": 5, # Set logprobs to an integer value\n "run-time": "60s", # Reduced runtime for faster testing\n "temperature": 1.0, # Add temperature parameter\n "top_p": 1.0, # Add top_p parameter to ensure no early stopping\n "frequency_penalty": 0.0, # Add frequency penalty\n "presence_penalty": 0.0, # Add presence penalty\n }\n\n # Create a results directory with timestamp\n timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")\n results_dir = os.path.join("results", f"run_{timestamp}")\n\n load_test = LLMLoadTest(base_config, results_dir)\n\n concurrent_users = [1]#, 2, 5, 10, 20, 50] # Test configurations\n max_tokens = [50]#1, 15, 30, 50]\n wait_time = 5 # Increased wait time between runs to ensure cleanup\n\n try:\n for u in concurrent_users:\n for o in max_tokens:\n test_name = f"test_u{u}_o{o}"\n print(f"\\n=== Starting test {test_name} ===")\n load_test.run_test(test_name, u, o)\n print(f"=== Test {test_name} completed ===")\n print(f"Waiting for {wait_time} seconds before the next run...")\n time.sleep(wait_time)\n\n load_test.generate_report()\n print("\\nAll tests completed successfully!")\n \n except KeyboardInterrupt:\n print("\\nTests interrupted by user")\n load_test.generate_report() # Generate report for completed tests\n except Exception as e:\n print(f"\\nError during test execution: {str(e)}")\n load_test.generate_report() # Generate report for completed tests\n\nif __name__ == "__main__":\n main()\n', - line1: 1, - line2: 356, - }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: - "Let me also check the load_test.py file to understand how the actual load testing is performed:", - ftm_tool_calls: [ - { - id: "toolu_01WXJiwA9oLdRRAS4SADKvmM", - function: { - arguments: - '{"paths": "/home/fupfv/git/benchmark1_0701/src/load_test.py", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01WXJiwA9oLdRRAS4SADKvmM", - ftm_content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/src/load_test.py\n", - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "benchmark1_0701/src/load_test.py", - file_content: - 'import abc\nimport argparse\nimport csv\nfrom dataclasses import dataclass\nfrom functools import partial\nimport os\nimport random\nimport sys\nimport traceback\nfrom typing import Optional\nfrom locust import HttpUser, task, events, constant_pacing\nimport copy\nimport json\nimport time\nimport orjson\nimport threading\nimport logging\nimport requests\n\n# Set up logging\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\'%(asctime)s - %(levelname)s - %(message)s\',\n handlers=[\n logging.StreamHandler(),\n logging.FileHandler(\'load_test.log\')\n ]\n)\n\ndef test_single_request():\n url = "https://dogfood.pilot.smallcloud.ai/v1/completions"\n headers = {\n "Content-Type": "application/json",\n "Authorization": "Bearer d2aed710-a47b-4a3f-84f4-b02628c45e49"\n }\n data = {\n "model": "starcoder2/3b/vllm",\n "prompt": "print", # Shorter prompt\n "max_tokens": 15,\n "temperature": 1.0,\n "stream": False,\n "timeout": 10\n }\n \n logging.info("Sending request with data: %s", json.dumps(data, indent=2))\n try:\n response = requests.post(url, headers=headers, json=data, timeout=10)\n logging.info(f"Response status: {response.status_code}")\n logging.info(f"Response headers: {dict(response.headers)}")\n \n if response.status_code == 200:\n resp_json = response.json()\n logging.info("Raw response: %s", json.dumps(resp_json, indent=2))\n \n # Check token counts\n usage = resp_json.get("usage", {})\n generated_tokens = resp_json.get("generated_tokens_n")\n \n logging.info("\\nToken counts:")\n logging.info(f"usage.completion_tokens: {usage.get(\'completion_tokens\')}")\n logging.info(f"generated_tokens_n: {generated_tokens}")\n \n # Check text output\n choices = resp_json.get("choices", [])\n if choices:\n text = choices[0].get("text", "")\n logging.info(f"\\nGenerated text ({len(text)} chars):")\n logging.info(text)\n else:\n logging.error("Error response: %s", response.text)\n except requests.exceptions.Timeout:\n logging.error("Request timed out after 10 seconds")\n except Exception as e:\n logging.error("Error during request: %s", str(e))\n\nif __name__ == "__main__":\n test_single_request()\n\ntry:\n import locust_plugins\nexcept ImportError:\n print("locust-plugins is not installed, Grafana won\'t work")\n\n\ndef add_custom_metric(name, value, length_value=0):\n events.request.fire(\n request_type="METRIC",\n name=name,\n response_time=value,\n response_length=length_value,\n exception=None,\n context=None,\n )\n\n\nprompt_prefix = "Pad " # exactly one token\n# "Lengthy" prompt borrowed from nat.dev\nprompt = """Generate a Django application with Authentication, JWT, Tests, DB support. Show docker-compose for python and postgres. Show the complete code for every file!"""\nprompt_tokens = 35 # from Llama tokenizer tool (so we don\'t import it here)\nprompt_random_tokens = 10\n\n\nclass FixedQPSPacer:\n _instance = None\n _lock = threading.Lock()\n\n def __init__(self, qps, distribution):\n self.qps = qps\n self.distribution = distribution\n\n # It\'s kind of thread safe thanks to GIL as the only state is `t` - good enough for a loadtest\n def gen():\n t = time.time()\n mean_wait = 1 / self.qps\n while True:\n if self.distribution == "exponential":\n wait = random.expovariate(1 / mean_wait)\n elif self.distribution == "uniform":\n wait = random.uniform(0, 2 * mean_wait)\n elif self.distribution == "constant":\n wait = mean_wait\n else:\n print("Unknown distribution {self.distribution}")\n os._exit(1)\n t += wait\n yield t\n\n self.iterator = gen()\n\n @classmethod\n def instance(cls, qps, distribution):\n with cls._lock:\n if cls._instance is None:\n cls._instance = cls(qps, distribution)\n else:\n assert cls._instance.qps == qps\n assert cls._instance.distribution == distribution\n return cls._instance\n\n def wait_time_till_next(self):\n with self._lock:\n t = next(self.iterator)\n now = time.time()\n if now > t:\n print(\n f"WARNING: not enough locust users to keep up with the desired QPS. Either the number of locust users is too low or the server is overloaded. Delay: {now-t:.3f}s"\n )\n return 0\n return t - now\n\n\nclass LengthSampler:\n def __init__(self, distribution: str, mean: int, cap: Optional[int], alpha: float):\n self.distribution = distribution\n self.mean = mean\n self.cap = cap\n self.alpha = alpha\n\n if self.distribution == "exponential":\n self.sample_func = lambda: int(random.expovariate(1 / self.mean))\n elif self.distribution == "uniform":\n mx = self.mean + int(self.alpha * self.mean)\n if self.cap is not None:\n mx = min(mx, self.cap)\n self.sample_func = lambda: random.randint(\n max(1, self.mean - int(self.alpha * self.mean)), mx\n )\n elif self.distribution == "constant":\n self.sample_func = lambda: self.mean\n elif self.distribution == "normal":\n self.sample_func = lambda: int(\n random.gauss(self.mean, self.mean * self.alpha)\n )\n else:\n raise ValueError(f"Unknown distribution {self.distribution}")\n\n def sample(self) -> int:\n for _ in range(1000):\n sample = self.sample_func()\n if sample <= 0:\n continue\n if self.cap is not None and sample > self.cap:\n continue\n return sample\n else:\n raise ValueError(\n "Can\'t sample a value after 1000 attempts, check distribution parameters"\n )\n\n def __str__(self):\n r = int(self.mean * self.alpha)\n if self.distribution == "constant":\n s = str(self.mean)\n elif self.distribution == "uniform":\n s = f"uniform({self.mean} +/- {r})"\n elif self.distribution == "normal":\n s = f"normal({self.mean}, {r})"\n elif self.distribution == "exponential":\n s = f"exponential({self.mean})"\n else:\n assert False\n if self.cap is not None:\n s += f" capped at {self.cap}"\n return s\n\n\nclass InitTracker:\n lock = threading.Lock()\n users = None\n first_request_done = 0\n logging_params = None\n environment = None\n tokenizer = None\n\n @classmethod\n def notify_init(cls, environment, logging_params):\n with cls.lock:\n if cls.environment is None:\n cls.environment = environment\n if cls.logging_params is None:\n cls.logging_params = logging_params\n else:\n assert (\n cls.logging_params == logging_params\n ), f"Inconsistent settings between workers: {cls.logging_params} != {logging_params}"\n\n @classmethod\n def notify_first_request(cls):\n with cls.lock:\n if (\n cls.environment.parsed_options.qps is not None\n and cls.first_request_done == 0\n ):\n # if in QPS mode, reset after first successful request comes back\n cls.reset_stats()\n cls.first_request_done += 1\n if (\n cls.environment.parsed_options.qps is not None\n and cls.first_request_done == 0\n and cls.users == cls.first_request_done\n ):\n # if in fixed load mode, reset after all users issued one request (we\'re in a steady state)\n cls.reset_stats()\n\n @classmethod\n def notify_spawning_complete(cls, user_count):\n with cls.lock:\n cls.users = user_count\n if cls.users == cls.first_request_done:\n cls.reset_stats()\n\n @classmethod\n def reset_stats(cls):\n assert cls.environment.runner, "only local mode is supported"\n print("Resetting stats after traffic reach a steady state")\n cls.environment.events.reset_stats.fire()\n cls.environment.runner.stats.reset_all()\n\n @classmethod\n def load_tokenizer(cls, dir):\n if not dir:\n return None\n with cls.lock:\n if cls.tokenizer:\n return cls.tokenizer\n import transformers\n\n cls.tokenizer = transformers.AutoTokenizer.from_pretrained(dir)\n cls.tokenizer.add_bos_token = False\n cls.tokenizer.add_eos_token = False\n return cls.tokenizer\n\n\nevents.spawning_complete.add_listener(InitTracker.notify_spawning_complete)\n\n\n@dataclass\nclass ChunkMetadata:\n text: str\n logprob_tokens: Optional[int]\n usage_tokens: Optional[int]\n prompt_usage_tokens: Optional[int]\n max_tokens: Optional[int] = None\n should_retry: bool = False\n\n\nclass BaseProvider(abc.ABC):\n DEFAULT_MODEL_NAME = None\n\n def __init__(self, model, parsed_options):\n self.model = model\n self.parsed_options = parsed_options\n\n @abc.abstractmethod\n def get_url(self): ...\n\n @abc.abstractmethod\n def format_payload(self, prompt, max_tokens, images): ...\n\n @abc.abstractmethod\n def parse_output_json(self, json, prompt): ...\n\n\nclass OpenAIProvider(BaseProvider):\n def get_url(self):\n if self.parsed_options.chat:\n return "v1/chat/completions"\n else:\n #return ""\n return "v1/completions"\n\n def format_payload(self, prompt, max_tokens, images):\n data = {\n "model": self.model,\n "max_tokens": max_tokens,\n "stream": self.parsed_options.stream,\n "temperature": self.parsed_options.temperature,\n # Add strict token control\n "min_tokens": max_tokens, # Force minimum tokens\n "ignore_eos": True, # Don\'t stop on EOS token\n "stop": None, # Disable stop sequences\n "best_of": 1, # Disable multiple sequences\n "use_beam_search": False, # Disable beam search\n "top_p": 1.0, # Disable nucleus sampling\n "top_k": 0, # Disable top-k sampling\n "presence_penalty": 0.0, # No presence penalty\n "frequency_penalty": 0.0, # No frequency penalty\n }\n if self.parsed_options.chat:\n if images is None:\n data["messages"] = [{"role": "user", "content": prompt}]\n else:\n image_urls = []\n for image in images:\n image_urls.append(\n {"type": "image_url", "image_url": {"url": image}}\n )\n data["messages"] = [\n {\n "role": "user",\n "content": [{"type": "text", "text": prompt}, *image_urls],\n }\n ]\n else:\n data["prompt"] = prompt\n if images is not None:\n data["images"] = images\n if self.parsed_options.logprobs is not None:\n data["logprobs"] = self.parsed_options.logprobs\n return data\n\n def parse_output_json(self, data, prompt):\n # Check for error response\n if data.get("status") == "error":\n error_msg = data.get(\'human_readable_message\', \'unknown error\')\n print(f"API Error: {error_msg}")\n \n # For timeout errors, return a special metadata\n if error_msg == "timeout":\n return ChunkMetadata(\n text="[TIMEOUT]",\n logprob_tokens=None,\n usage_tokens=self.parsed_options.max_tokens, # Use requested token count\n prompt_usage_tokens=None,\n max_tokens=self.parsed_options.max_tokens\n )\n \n # For other errors\n return ChunkMetadata(\n text="[ERROR]",\n logprob_tokens=None,\n usage_tokens=0,\n prompt_usage_tokens=None,\n max_tokens=None\n )\n \n usage = data.get("usage", None)\n generated_tokens = data.get("generated_tokens_n", None)\n\n # Handle empty choices array\n choices = data.get("choices", [])\n if not choices:\n # Return empty text with usage info if available\n return ChunkMetadata(\n text="",\n logprob_tokens=None,\n usage_tokens=generated_tokens if generated_tokens is not None else (usage["completion_tokens"] if usage else self.parsed_options.max_tokens),\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=data.get("max_tokens", self.parsed_options.max_tokens)\n )\n\n choice = choices[0]\n if self.parsed_options.chat:\n if self.parsed_options.stream:\n text = choice["delta"].get("content", "")\n else:\n text = choice["message"]["content"]\n else:\n text = choice.get("text", "")\n\n logprobs = choice.get("logprobs", None)\n tokens = generated_tokens if generated_tokens is not None else (\n usage["completion_tokens"] if usage else self.parsed_options.max_tokens\n )\n\n # Validate token count matches request\n if tokens != self.parsed_options.max_tokens:\n print(f"WARNING: Generated tokens {tokens} != requested {self.parsed_options.max_tokens}")\n\n return ChunkMetadata(\n text=text,\n logprob_tokens=len(logprobs["tokens"]) if logprobs else None,\n usage_tokens=tokens,\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=data.get("max_tokens", self.parsed_options.max_tokens)\n )\n\n\nclass FireworksProvider(OpenAIProvider):\n def format_payload(self, prompt, max_tokens, images):\n data = super().format_payload(prompt, max_tokens, images)\n data["min_tokens"] = max_tokens\n data["prompt_cache_max_len"] = 0\n return data\n\n\nclass VllmProvider(OpenAIProvider):\n def format_payload(self, prompt, max_tokens, images):\n data = {\n "model": self.model,\n "prompt": prompt,\n "max_tokens": max_tokens,\n "stream": self.parsed_options.stream,\n "temperature": self.parsed_options.temperature,\n # VLLM specific parameters for exact token generation\n "ignore_eos": True,\n "min_tokens": max_tokens,\n "stop": [], # Empty list instead of None\n "best_of": 1,\n "use_beam_search": False,\n "top_p": 1.0,\n "top_k": -1, # -1 instead of 0 for VLLM\n "presence_penalty": 0.0,\n "frequency_penalty": 0.0\n }\n if self.parsed_options.logprobs is not None:\n data["logprobs"] = self.parsed_options.logprobs\n if images is not None:\n data["images"] = images\n return data\n\n def parse_output_json(self, data, prompt):\n # Handle error responses\n if data.get("status") == "error":\n error_msg = data.get(\'human_readable_message\', \'unknown error\')\n print(f"API Error: {error_msg}")\n return ChunkMetadata(\n text="[ERROR]",\n logprob_tokens=None,\n usage_tokens=0,\n prompt_usage_tokens=None,\n max_tokens=None,\n should_retry=False\n )\n \n usage = data.get("usage", None)\n generated_tokens = data.get("generated_tokens_n", None)\n choices = data.get("choices", [])\n \n if not choices:\n return ChunkMetadata(\n text="",\n logprob_tokens=None,\n usage_tokens=generated_tokens if generated_tokens is not None else (usage["completion_tokens"] if usage else self.parsed_options.max_tokens),\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=self.parsed_options.max_tokens,\n should_retry=False\n )\n\n choice = choices[0]\n text = choice.get("text", "")\n logprobs = choice.get("logprobs", None)\n tokens = generated_tokens if generated_tokens is not None else (\n usage["completion_tokens"] if usage else self.parsed_options.max_tokens\n )\n\n # Log token generation details\n print(f"Generated tokens: {tokens}, Requested: {self.parsed_options.max_tokens}")\n \n return ChunkMetadata(\n text=text,\n logprob_tokens=len(logprobs["tokens"]) if logprobs else None,\n usage_tokens=tokens,\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=self.parsed_options.max_tokens,\n should_retry=False\n )\n # Force exact token generation\n data.update({\n "ignore_eos": True,\n "max_tokens": max_tokens,\n "min_tokens": max_tokens,\n "stop": None,\n "best_of": 1,\n "use_beam_search": False,\n "top_p": 1.0, # Disable nucleus sampling\n "top_k": 0, # Disable top-k sampling\n "presence_penalty": 0.0,\n "frequency_penalty": 0.0,\n "temperature": 1.0, # Use standard temperature\n "early_stopping": False\n })\n return data\n\n\nclass TogetherProvider(OpenAIProvider):\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n return "/"\n\n def format_payload(self, prompt, max_tokens, images):\n data = super().format_payload(prompt, max_tokens, images)\n data["ignore_eos"] = True\n data["stream_tokens"] = data.pop("stream")\n return data\n\n def parse_output_json(self, data, prompt):\n if not self.parsed_options.stream:\n data = data["output"]\n return super().parse_output_json(data, prompt)\n\n\nclass TritonInferProvider(BaseProvider):\n DEFAULT_MODEL_NAME = "ensemble"\n\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n assert not self.parsed_options.stream, "Stream is not supported"\n return f"/v2/models/{self.model}/infer"\n\n def format_payload(self, prompt, max_tokens, images):\n assert images is None, "images are not supported"\n # matching latest TRT-LLM example, your model configuration might be different\n data = {\n "inputs": [\n {\n "name": "text_input",\n "datatype": "BYTES",\n "shape": [1, 1],\n "data": [[prompt]],\n },\n {\n "name": "max_tokens",\n "datatype": "UINT32",\n "shape": [1, 1],\n "data": [[max_tokens]],\n },\n {\n "name": "bad_words",\n "datatype": "BYTES",\n "shape": [1, 1],\n "data": [[""]],\n },\n {\n "name": "stop_words",\n "datatype": "BYTES",\n "shape": [1, 1],\n "data": [[""]],\n },\n {\n "name": "temperature",\n "datatype": "FP32",\n "shape": [1, 1],\n "data": [[self.parsed_options.temperature]],\n },\n ]\n }\n assert self.parsed_options.logprobs is None, "logprobs are not supported"\n return data\n\n def parse_output_json(self, data, prompt):\n for output in data["outputs"]:\n if output["name"] == "text_output":\n assert output["datatype"] == "BYTES"\n assert output["shape"] == [1]\n text = output["data"][0]\n # Triton returns the original prompt in the output, cut it off\n text = text.removeprefix(" ")\n if text.startswith(prompt):\n # HF tokenizers get confused by the leading space\n text = text[len(prompt) :].removeprefix(" ")\n else:\n print("WARNING: prompt not found in the output")\n return ChunkMetadata(\n text=text,\n logprob_tokens=None,\n usage_tokens=None,\n prompt_usage_tokens=None,\n )\n raise ValueError("text_output not found in the response")\n\n\nclass TritonGenerateProvider(BaseProvider):\n DEFAULT_MODEL_NAME = "ensemble"\n\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n stream_suffix = "_stream" if self.parsed_options.stream else ""\n return f"/v2/models/{self.model}/generate{stream_suffix}"\n\n def format_payload(self, prompt, max_tokens, images):\n assert images is None, "images are not supported"\n data = {\n "text_input": prompt,\n "max_tokens": max_tokens,\n "stream": self.parsed_options.stream,\n "temperature": self.parsed_options.temperature,\n # for whatever reason these has to be provided\n "bad_words": "",\n "stop_words": "",\n }\n assert self.parsed_options.logprobs is None, "logprobs are not supported"\n return data\n\n def parse_output_json(self, data, prompt):\n text = data["text_output"]\n if not self.parsed_options.stream:\n # Triton returns the original prompt in the output, cut it off\n text = text.removeprefix(" ")\n if text.startswith(prompt):\n # HF tokenizers get confused by the leading space\n text = text[len(prompt) :].removeprefix(" ")\n else:\n print("WARNING: prompt not found in the output")\n return ChunkMetadata(\n text=text,\n logprob_tokens=None,\n usage_tokens=None,\n prompt_usage_tokens=None,\n )\n\n\nclass TgiProvider(BaseProvider):\n DEFAULT_MODEL_NAME = ""\n\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n stream_suffix = "_stream" if self.parsed_options.stream else ""\n return f"/generate{stream_suffix}"\n\n def format_payload(self, prompt, max_tokens, images):\n assert images is None, "images are not supported"\n data = {\n "inputs": prompt,\n "parameters": {\n "max_new_tokens": max_tokens,\n "temperature": self.parsed_options.temperature,\n "top_n_tokens": self.parsed_options.logprobs,\n "details": self.parsed_options.logprobs is not None,\n },\n }\n return data\n\n def parse_output_json(self, data, prompt):\n if "token" in data:\n # streaming chunk\n return ChunkMetadata(\n text=data["token"]["text"],\n logprob_tokens=1,\n usage_tokens=None,\n prompt_usage_tokens=None,\n )\n else:\n # non-streaming response\n return ChunkMetadata(\n text=data["generated_text"],\n logprob_tokens=(\n len(data["details"]["tokens"]) if "details" in data else None\n ),\n usage_tokens=(\n data["details"]["generated_tokens"] if "details" in data else None\n ),\n prompt_usage_tokens=None,\n )\n\n\nPROVIDER_CLASS_MAP = {\n "fireworks": FireworksProvider,\n "vllm": VllmProvider,\n "openai": OpenAIProvider,\n "anyscale": OpenAIProvider,\n "together": TogetherProvider,\n "triton-infer": TritonInferProvider,\n "triton-generate": TritonGenerateProvider,\n "tgi": TgiProvider,\n}\n\n\ndef _load_curl_like_data(text):\n """\n Either use the passed string or load from a file if the string is `@filename`\n """\n if text.startswith("@"):\n try:\n if text.endswith(".jsonl"):\n with open(text[1:], "r") as f:\n return [json.loads(line) for line in f]\n else:\n with open(text[1:], "r") as f:\n return f.read()\n except Exception as e:\n raise ValueError(f"Failed to read file {text[1:]}") from e\n else:\n return text\n\n\nclass LLMUser(HttpUser):\n # no wait time, so every user creates a continuous load, sending requests as quickly as possible\n\n def on_start(self):\n try:\n self._on_start()\n except Exception as e:\n print(f"Failed to initialize: {repr(e)}")\n print(traceback.format_exc())\n sys.exit(1)\n\n def _guess_provider(self):\n self.model = self.environment.parsed_options.model\n self.provider = self.environment.parsed_options.provider\n # guess based on URL\n if self.provider is None:\n if "fireworks.ai" in self.host:\n self.provider = "fireworks"\n elif "together" in self.host:\n self.provider = "together"\n elif "openai" in self.host:\n self.provider = "openai"\n elif "anyscale" in self.host:\n self.provider = "anyscale"\n\n if (\n self.model is None\n and self.provider is not None\n and PROVIDER_CLASS_MAP[self.provider].DEFAULT_MODEL_NAME is not None\n ):\n self.model = PROVIDER_CLASS_MAP[self.provider].DEFAULT_MODEL_NAME\n\n if self.model and self.provider:\n return\n\n # vllm doesn\'t support /model/ endpoint, so iterate over all models\n try:\n resp = self.client.get("/v1/models")\n resp.raise_for_status()\n resp = resp.json()\n except Exception as e:\n raise ValueError(\n "Argument --model or --provider was not specified and /v1/models failed"\n ) from e\n\n models = resp["data"]\n assert len(models) > 0, "No models found in /v1/models"\n owned_by = None\n # pick the first model\n for m in models:\n if self.model is None or m["id"] == self.model:\n self.model = m["id"]\n owned_by = m["owned_by"]\n break\n if self.provider is None:\n if not owned_by:\n raise ValueError(\n f"Model {self.model} not found in /v1/models. Specify --provider explicitly"\n )\n if owned_by in ["vllm", "fireworks"]:\n self.provider = owned_by\n else:\n raise ValueError(\n f"Can\'t detect provider, specify it explicitly with --provider, owned_by={owned_by}"\n )\n\n def _on_start(self):\n self.client.headers["Content-Type"] = "application/json"\n if self.environment.parsed_options.api_key:\n self.client.headers["Authorization"] = (\n "Bearer " + self.environment.parsed_options.api_key\n )\n self._guess_provider()\n print(f" Provider {self.provider} using model {self.model} ".center(80, "*"))\n self.provider_formatter = PROVIDER_CLASS_MAP[self.provider](\n self.model, self.environment.parsed_options\n )\n\n self.stream = self.environment.parsed_options.stream\n prompt_chars = self.environment.parsed_options.prompt_chars\n if self.environment.parsed_options.prompt_text:\n self.input = _load_curl_like_data(\n self.environment.parsed_options.prompt_text\n )\n elif prompt_chars:\n self.input = (\n prompt_prefix * (prompt_chars // len(prompt_prefix) + 1) + prompt\n )[:prompt_chars]\n else:\n min_prompt_len = (\n prompt_tokens\n + prompt_random_tokens\n * self.environment.parsed_options.prompt_randomize\n )\n assert (\n self.environment.parsed_options.prompt_tokens >= min_prompt_len\n ), f"Minimal prompt length is {min_prompt_len}"\n self.input = (\n prompt_prefix\n * (self.environment.parsed_options.prompt_tokens - min_prompt_len)\n + prompt\n )\n self.max_tokens_sampler = LengthSampler(\n distribution=self.environment.parsed_options.max_tokens_distribution,\n mean=self.environment.parsed_options.max_tokens,\n cap=self.environment.parsed_options.max_tokens_cap,\n alpha=self.environment.parsed_options.max_tokens_range,\n )\n self.temperature = self.environment.parsed_options.temperature\n\n logging_params = {\n # TODO: add some server info with git version\n "provider": self.provider,\n "model": self.model,\n "prompt_tokens": self.environment.parsed_options.prompt_tokens, # might be overwritten based on metric\n "generation_tokens": str(self.max_tokens_sampler),\n "stream": self.stream,\n "temperature": self.temperature,\n "logprobs": self.environment.parsed_options.logprobs,\n }\n InitTracker.notify_init(self.environment, logging_params)\n\n self.tokenizer = InitTracker.load_tokenizer(\n self.environment.parsed_options.tokenizer\n )\n if self.tokenizer:\n self.prompt_tokenizer_tokens = len(\n self.tokenizer.encode(self._get_input()[0])\n )\n else:\n self.prompt_tokenizer_tokens = None\n\n if self.environment.parsed_options.qps is not None:\n if self.environment.parsed_options.burst:\n raise ValueError("Burst and QPS modes are mutually exclusive")\n pacer = FixedQPSPacer.instance(\n self.environment.parsed_options.qps,\n self.environment.parsed_options.qps_distribution,\n )\n # it will be called by Locust after each task\n self.wait_time = pacer.wait_time_till_next\n self.wait()\n elif self.environment.parsed_options.burst:\n self.wait_time = partial(\n constant_pacing(self.environment.parsed_options.burst), self\n )\n else:\n # introduce initial delay to avoid all users hitting the service at the same time\n time.sleep(random.random())\n\n self.first_done = False\n\n def _get_input(self):\n def _maybe_randomize(prompt):\n if not self.environment.parsed_options.prompt_randomize:\n return prompt\n # single letters are single tokens\n return (\n " ".join(\n chr(ord("a") + random.randint(0, 25))\n for _ in range(prompt_random_tokens)\n )\n + " "\n + prompt\n )\n\n if isinstance(self.input, str):\n return _maybe_randomize(self.input), None\n else:\n item = self.input[random.randint(0, len(self.input) - 1)]\n assert "prompt" in item\n return _maybe_randomize(item["prompt"]), item.get("images", None)\n\n @task\n def generate_text(self):\n max_tokens = self.max_tokens_sampler.sample()\n prompt, images = self._get_input()\n data = self.provider_formatter.format_payload(prompt, max_tokens, images)\n t_start = time.perf_counter()\n\n logging.debug("Sending request with data: %s", json.dumps(data, indent=2))\n \n with self.client.post(\n self.provider_formatter.get_url(),\n data=json.dumps(data),\n stream=True,\n catch_response=True,\n ) as response:\n logging.debug("Got response status: %d", response.status_code)\n logging.debug("Response headers: %s", dict(response.headers))\n \n dur_chunks = []\n combined_text = ""\n done = False\n prompt_usage_tokens = self.prompt_tokenizer_tokens\n total_usage_tokens = None\n total_logprob_tokens = None\n try:\n response.raise_for_status()\n except Exception as e:\n logging.error("Response error text: %s", response.text)\n raise RuntimeError(f"Error in response: {response.text}") from e\n t_first_token = None\n for chunk in response.iter_lines(delimiter=b"\\n\\n"):\n if t_first_token is None:\n t_first_token = time.perf_counter()\n t_prev = time.perf_counter()\n\n if len(chunk) == 0:\n continue # come providers send empty lines between data chunks\n if done:\n if chunk != b"data: [DONE]":\n print(f"WARNING: Received more chunks after [DONE]: {chunk}")\n try:\n now = time.perf_counter()\n dur_chunks.append(now - t_prev)\n t_prev = now\n if self.stream:\n assert chunk.startswith(\n b"data:"\n ), f"Unexpected chunk not starting with \'data\': {chunk}"\n chunk = chunk[len(b"data:") :]\n if chunk.strip() == b"[DONE]":\n done = True\n continue\n logging.debug("Processing chunk: %s", chunk.decode())\n data = orjson.loads(chunk)\n logging.debug("Parsed chunk data: %s", json.dumps(data, indent=2))\n out = self.provider_formatter.parse_output_json(data, prompt)\n if out.usage_tokens:\n total_usage_tokens = (\n total_usage_tokens or 0\n ) + out.usage_tokens\n logging.debug("Updated total_usage_tokens: %d", total_usage_tokens)\n if out.prompt_usage_tokens:\n prompt_usage_tokens = out.prompt_usage_tokens\n logging.debug("Updated prompt_usage_tokens: %d", prompt_usage_tokens)\n combined_text += out.text\n\n if out.logprob_tokens:\n total_logprob_tokens = (\n total_logprob_tokens or 0\n ) + out.logprob_tokens\n logging.debug("Updated total_logprob_tokens: %d", total_logprob_tokens)\n except Exception as e:\n logging.error("Failed to parse response: %s with error %s", chunk, repr(e))\n response.failure(e)\n return\n assert t_first_token is not None, "empty response received"\n if (\n (total_logprob_tokens is not None)\n and (total_usage_tokens is not None)\n and total_logprob_tokens != total_usage_tokens\n ):\n print(\n f"WARNING: usage_tokens {total_usage_tokens} != logprob_tokens {total_logprob_tokens}"\n )\n if total_logprob_tokens is not None:\n num_tokens = total_logprob_tokens\n else:\n num_tokens = total_usage_tokens\n if self.tokenizer:\n num_tokenizer_tokens = len(self.tokenizer.encode(combined_text))\n if num_tokens is None:\n num_tokens = num_tokenizer_tokens\n elif num_tokens != num_tokenizer_tokens:\n print(\n f"WARNING: tokenizer token count {num_tokenizer_tokens} != {num_tokens} received from server"\n )\n num_tokens = num_tokens or 0\n num_chars = len(combined_text)\n now = time.perf_counter()\n dur_total = now - t_start\n dur_generation = now - t_first_token\n dur_first_token = t_first_token - t_start\n print(\n f"Response received: total {dur_total*1000:.2f} ms, first token {dur_first_token*1000:.2f} ms, {num_chars} chars, {num_tokens} tokens"\n )\n if self.environment.parsed_options.show_response:\n print("---")\n print(combined_text)\n print("---")\n if num_chars:\n add_custom_metric(\n "latency_per_char", dur_generation / num_chars * 1000, num_chars\n )\n if self.stream:\n add_custom_metric("time_to_first_token", dur_first_token * 1000)\n add_custom_metric("total_latency", dur_total * 1000)\n if num_tokens:\n if num_tokens != max_tokens:\n print(\n f"WARNING: wrong number of tokens: {num_tokens}, expected {max_tokens}"\n )\n add_custom_metric("num_tokens", num_tokens)\n add_custom_metric("max_tokens", max_tokens) # Add max_tokens metric\n add_custom_metric(\n "latency_per_token", dur_generation / num_tokens * 1000, num_tokens\n )\n add_custom_metric(\n "overall_latency_per_token",\n dur_total / num_tokens * 1000,\n num_tokens,\n )\n if (\n prompt_usage_tokens is not None\n and self.prompt_tokenizer_tokens is not None\n and prompt_usage_tokens != self.prompt_tokenizer_tokens\n ):\n print(\n f"WARNING: prompt usage tokens {prompt_usage_tokens} != {self.prompt_tokenizer_tokens} derived from local tokenizer"\n )\n prompt_tokens = prompt_usage_tokens or self.prompt_tokenizer_tokens\n if prompt_tokens:\n add_custom_metric("prompt_tokens", prompt_tokens)\n\n if not self.first_done:\n self.first_done = True\n InitTracker.notify_first_request()\n\n\n@events.init_command_line_parser.add_listener\ndef init_parser(parser):\n parser.add_argument(\n "--provider",\n choices=list(PROVIDER_CLASS_MAP.keys()),\n type=str,\n help="Which flavor of API to use. If not specified, we\'ll try to guess based on the URL and /v1/models output",\n )\n parser.add_argument(\n "-m",\n "--model",\n env_var="MODEL",\n type=str,\n help="The model to use for generating text. If not specified we will pick the first model from the service as returned by /v1/models",\n )\n parser.add_argument(\n "--chat",\n action=argparse.BooleanOptionalAction,\n default=False,\n help="Use /v1/chat/completions API",\n )\n parser.add_argument(\n "-p",\n "--prompt-tokens",\n env_var="PROMPT_TOKENS",\n type=int,\n default=512,\n help="Length of the prompt in tokens. Default 512",\n )\n parser.add_argument(\n "--prompt-chars",\n env_var="PROMPT_CHARS",\n type=int,\n help="Length of the prompt in characters.",\n )\n parser.add_argument(\n "--prompt-text",\n env_var="PROMPT_TEXT",\n type=str,\n help="Prompt text to use instead of generating one. It can be a file reference starting with an ampersand, e.g. `@prompt.txt`",\n )\n parser.add_argument(\n "--prompt-randomize",\n action=argparse.BooleanOptionalAction,\n default=False,\n help="Include a few random numbers in the generated prompt to avoid caching",\n )\n parser.add_argument(\n "-o",\n "--max-tokens",\n env_var="MAX_TOKENS",\n type=int,\n default=64,\n help="Max number of tokens to generate. If --max-tokens-distribution is non-constant this is going to be the mean. Defaults to 64",\n )\n parser.add_argument(\n "--max-tokens-cap",\n env_var="MAX_TOKENS_CAP",\n type=int,\n help="If --max-tokens-distribution is non-constant, this truncates the distribition at the specified limit",\n )\n parser.add_argument(\n "--max-tokens-distribution",\n env_var="MAX_TOKENS_DISTRIBUTION",\n type=str,\n choices=["constant", "uniform", "exponential", "normal"],\n default="constant",\n help="How to sample `max-tokens` on each request",\n )\n parser.add_argument(\n "--max-tokens-range",\n env_var="MAX_TOKENS_RANGE",\n type=float,\n default=0.3,\n help="Specifies the width of the distribution. Specified value `alpha` is relative to `max-tokens`. For uniform distribution we\'d sample from [max_tokens - max_tokens * alpha, max_tokens + max_tokens * alpha]. For normal distribution we\'d sample from `N(max_tokens, max_tokens * alpha)`. Defaults to 0.3",\n )\n parser.add_argument(\n "--stream",\n dest="stream",\n action=argparse.BooleanOptionalAction,\n default=True,\n help="Use the streaming API",\n )\n parser.add_argument(\n "-k",\n "--api-key",\n env_var="API_KEY",\n help="Auth for the API",\n )\n parser.add_argument(\n "--temperature",\n env_var="TEMPERATURE",\n type=float,\n default=1.0,\n help="Temperature parameter for the API",\n )\n parser.add_argument(\n "--logprobs",\n type=int,\n default=None,\n help="Whether to ask for logprobs, it makes things slower for some providers but is necessary for token count in streaming (unless it\'s Fireworks API that returns usage in streaming mode)",\n )\n parser.add_argument(\n "--summary-file",\n type=str,\n help="Append the line with the summary to the specified CSV file. Useful for generating a spreadsheet with perf sweep results. If the file doesn\'t exist, writes out the header first",\n )\n parser.add_argument(\n "--qps",\n type=float,\n default=None,\n help="Enabled \'fixed QPS\' mode where requests are issues at the specified rate regardless of how long the processing takes. In this case --users and --spawn-rate need to be set to a sufficiently high value (e.g. 100)",\n )\n parser.add_argument(\n "--qps-distribution",\n type=str,\n choices=["constant", "uniform", "exponential"],\n default="constant",\n help="Must be used with --qps. Specifies how to space out requests: equally (\'constant\') or by sampling wait times from a distribution (\'uniform\' or \'exponential\'). Expected QPS is going to match --qps",\n )\n parser.add_argument(\n "--burst",\n type=float,\n default=None,\n help="Makes requests to arrive in bursts every specified number of seconds. Note that burst duration has to be longer than maximum time of the response. Size of the burst is controlled by --users. The spawn rate -r is best set to a high value",\n )\n parser.add_argument(\n "--tokenizer",\n type=str,\n help="Specify HF tokenizer to use for validating the output of the model. It\'s optional, we\'re going to rely on \'usage\' or \'logprobs\' field to get token count information",\n )\n parser.add_argument(\n "--show-response",\n action=argparse.BooleanOptionalAction,\n default=False,\n help="Print the result of each generation",\n )\n\n\n@events.quitting.add_listener\ndef _(environment, **kw):\n total_latency = environment.stats.entries[("total_latency", "METRIC")]\n if environment.stats.total.num_failures > 0 or total_latency.num_requests == 0:\n print("Test failed due to failed requests")\n environment.process_exit_code = 1\n return\n\n entries = copy.copy(InitTracker.logging_params)\n if environment.parsed_options.qps is not None:\n entries["concurrency"] = (\n f"QPS {environment.parsed_options.qps} {environment.parsed_options.qps_distribution}"\n )\n else:\n entries["concurrency"] = InitTracker.users\n for metric_name in [\n "time_to_first_token",\n "latency_per_token",\n "num_tokens",\n "total_latency",\n "prompt_tokens", # might overwrite the static value based on server side tokenization\n ]:\n entries[metric_name] = environment.stats.entries[\n (metric_name, "METRIC")\n ].avg_response_time\n if not environment.parsed_options.stream:\n # if there\'s no streaming these metrics are meaningless\n entries["time_to_first_token"] = ""\n entries["latency_per_token"] = ""\n entries["num_requests"] = total_latency.num_requests\n entries["qps"] = total_latency.total_rps\n\n pretty_name = lambda s: " ".join([w.capitalize() for w in s.split("_")])\n entries = {pretty_name(k): v for k, v in entries.items()}\n\n # print in the final event handler to make sure our output is the last one\n @events.quit.add_listener\n def exit_printer(**kw):\n max_width = max(len(k) for k in entries.keys())\n print(" Summary ".center(80, "="))\n for k, v in entries.items():\n print(f"{k:<{max_width}}: {v}")\n print("=" * 80)\n\n if environment.parsed_options.summary_file:\n with open(environment.parsed_options.summary_file, "a") as f:\n writer = csv.DictWriter(f, fieldnames=entries.keys())\n if f.tell() == 0:\n writer.writeheader()\n writer.writerow(entries)\n', - line1: 1, - line2: 1225, - }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: - "Let me also check the visualization script to understand how the results are visualized:", - ftm_tool_calls: [ - { - id: "toolu_01VxiuWGqPfbMSgprqkhe7uU", - function: { - arguments: - '{"paths": "/home/fupfv/git/benchmark1_0701/src/visualize_results.py", "skeleton": false}', - name: "cat", - }, - type: "function", - index: 0, - }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01VxiuWGqPfbMSgprqkhe7uU", - ftm_content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/src/visualize_results.py\n", - }, - { - ftm_role: "context_file", - ftm_content: JSON.stringify([ - { - file_name: "src/visualize_results.py", - file_content: - "#!/usr/bin/env python3\n\"\"\"\nVisualization tool for LLM load test results.\nThis script generates various charts and visualizations from the benchmark results.\n\"\"\"\n\nimport os\nimport json\nimport glob\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nfrom datetime import datetime\n\nclass ResultsVisualizer:\n def __init__(self, results_dir, output_dir=None):\n \"\"\"\n Initialize the visualizer with the directory containing test results.\n \n Args:\n results_dir: Directory containing test result files\n output_dir: Directory to save visualizations (defaults to results_dir/visualizations)\n \"\"\"\n self.results_dir = Path(results_dir)\n if output_dir:\n self.output_dir = Path(output_dir)\n else:\n self.output_dir = self.results_dir / \"visualizations\"\n \n # Create output directory if it doesn't exist\n os.makedirs(self.output_dir, exist_ok=True)\n \n # Set style for plots\n sns.set_style(\"whitegrid\")\n plt.rcParams.update({\n 'figure.figsize': (12, 8),\n 'font.size': 12,\n 'axes.titlesize': 16,\n 'axes.labelsize': 14\n })\n \n # Load data\n self.data = self._load_data()\n \n def _load_data(self):\n \"\"\"Load and combine all CSV result files into a single DataFrame.\"\"\"\n all_files = glob.glob(str(self.results_dir / \"**\" / \"*.csv\"), recursive=True)\n \n # Filter out files that don't match the expected pattern\n result_files = [f for f in all_files if \"results_test\" in f or \"load_test_report\" in f]\n \n if not result_files:\n raise ValueError(f\"No result files found in {self.results_dir}\")\n \n print(f\"Found {len(result_files)} result files\")\n \n # Load all files into a list of dataframes\n dfs = []\n for file in result_files:\n try:\n df = pd.read_csv(file)\n # Add source file information\n df['source_file'] = os.path.basename(file)\n df['run_dir'] = os.path.basename(os.path.dirname(file))\n dfs.append(df)\n except Exception as e:\n print(f\"Error loading {file}: {e}\")\n \n if not dfs:\n raise ValueError(\"No valid data files could be loaded\")\n \n # Combine all dataframes\n combined_df = pd.concat(dfs, ignore_index=True)\n \n # Convert numeric columns\n numeric_cols = ['Time To First Token', 'Latency Per Token', 'Total Latency', \n 'Num Tokens', 'Num Requests', 'Qps', 'Prompt Tokens', \n 'Generation Tokens', 'Concurrency']\n \n for col in numeric_cols:\n if col in combined_df.columns:\n combined_df[col] = pd.to_numeric(combined_df[col], errors='coerce')\n \n # Extract user count and output token count from test_name\n if 'test_name' in combined_df.columns:\n combined_df['users'] = combined_df['test_name'].str.extract(r'test_u(\\d+)_o\\d+').astype(float)\n combined_df['output_tokens'] = combined_df['test_name'].str.extract(r'test_u\\d+_o(\\d+)').astype(float)\n \n return combined_df\n \n def plot_latency_by_concurrency(self):\n \"\"\"Plot latency metrics by concurrency level.\"\"\"\n if 'Concurrency' not in self.data.columns or 'Total Latency' not in self.data.columns:\n print(\"Required columns not found for latency by concurrency plot\")\n return\n \n plt.figure(figsize=(14, 8))\n \n # Group by concurrency and calculate mean latency\n grouped = self.data.groupby('Concurrency')[['Total Latency', 'Time To First Token', 'Latency Per Token']].mean().reset_index()\n \n # Plot\n plt.plot(grouped['Concurrency'], grouped['Total Latency'], 'o-', linewidth=2, label='Total Latency')\n plt.plot(grouped['Concurrency'], grouped['Time To First Token'], 's-', linewidth=2, label='Time To First Token')\n \n # Add second y-axis for latency per token\n ax2 = plt.gca().twinx()\n ax2.plot(grouped['Concurrency'], grouped['Latency Per Token'], '^-', color='green', linewidth=2, label='Latency Per Token')\n ax2.set_ylabel('Latency Per Token (ms)', color='green')\n ax2.tick_params(axis='y', colors='green')\n \n plt.title('Latency Metrics by Concurrency Level')\n plt.xlabel('Concurrent Users')\n plt.ylabel('Latency (ms)')\n plt.grid(True)\n \n # Combine legends from both axes\n lines1, labels1 = plt.gca().get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n plt.legend(lines1 + lines2, labels1 + labels2, loc='upper left')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'latency_by_concurrency.png')\n plt.close()\n \n def plot_throughput_by_concurrency(self):\n \"\"\"Plot throughput (QPS) by concurrency level.\"\"\"\n if 'Concurrency' not in self.data.columns or 'Qps' not in self.data.columns:\n print(\"Required columns not found for throughput plot\")\n return\n \n plt.figure(figsize=(12, 8))\n \n # Group by concurrency and calculate mean QPS\n grouped = self.data.groupby('Concurrency')['Qps'].mean().reset_index()\n \n # Plot\n sns.barplot(x='Concurrency', y='Qps', data=grouped)\n \n plt.title('Throughput (QPS) by Concurrency Level')\n plt.xlabel('Concurrent Users')\n plt.ylabel('Queries Per Second')\n plt.grid(True, axis='y')\n \n # Add value labels on top of bars\n for i, v in enumerate(grouped['Qps']):\n plt.text(i, v + 0.1, f\"{v:.2f}\", ha='center')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'throughput_by_concurrency.png')\n plt.close()\n \n def plot_latency_by_output_tokens(self):\n \"\"\"Plot latency metrics by output token count.\"\"\"\n if 'output_tokens' not in self.data.columns or 'Total Latency' not in self.data.columns:\n print(\"Required columns not found for latency by output tokens plot\")\n return\n \n plt.figure(figsize=(14, 8))\n \n # Group by output tokens and calculate mean latency\n grouped = self.data.groupby('output_tokens')[['Total Latency', 'Time To First Token', 'Latency Per Token']].mean().reset_index()\n \n # Plot\n plt.plot(grouped['output_tokens'], grouped['Total Latency'], 'o-', linewidth=2, label='Total Latency')\n plt.plot(grouped['output_tokens'], grouped['Time To First Token'], 's-', linewidth=2, label='Time To First Token')\n \n # Add second y-axis for latency per token\n ax2 = plt.gca().twinx()\n ax2.plot(grouped['output_tokens'], grouped['Latency Per Token'], '^-', color='green', linewidth=2, label='Latency Per Token')\n ax2.set_ylabel('Latency Per Token (ms)', color='green')\n ax2.tick_params(axis='y', colors='green')\n \n plt.title('Latency Metrics by Output Token Count')\n plt.xlabel('Output Tokens')\n plt.ylabel('Latency (ms)')\n plt.grid(True)\n \n # Combine legends from both axes\n lines1, labels1 = plt.gca().get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n plt.legend(lines1 + lines2, labels1 + labels2, loc='upper left')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'latency_by_output_tokens.png')\n plt.close()\n \n def plot_heatmap_latency(self):\n \"\"\"Create a heatmap of latency by concurrency and output tokens.\"\"\"\n if 'users' not in self.data.columns or 'output_tokens' not in self.data.columns:\n print(\"Required columns not found for heatmap plot\")\n return\n \n # Group by users and output_tokens and calculate mean latency\n pivot = self.data.pivot_table(\n index='users', \n columns='output_tokens', \n values='Total Latency',\n aggfunc='mean'\n )\n \n plt.figure(figsize=(12, 10))\n sns.heatmap(pivot, annot=True, fmt=\".1f\", cmap=\"YlGnBu\", linewidths=.5)\n \n plt.title('Total Latency (ms) by Concurrency and Output Tokens')\n plt.xlabel('Output Tokens')\n plt.ylabel('Concurrent Users')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'heatmap_latency.png')\n plt.close()\n \n def plot_model_comparison(self):\n \"\"\"Compare performance across different models if available.\"\"\"\n if 'Model' not in self.data.columns:\n print(\"Model column not found for model comparison plot\")\n return\n \n # Check if we have multiple models\n models = self.data['Model'].unique()\n if len(models) <= 1:\n print(\"Only one model found, skipping model comparison\")\n return\n \n plt.figure(figsize=(14, 10))\n \n # Create subplot for latency comparison\n plt.subplot(2, 1, 1)\n sns.boxplot(x='Model', y='Total Latency', data=self.data)\n plt.title('Latency Comparison Across Models')\n plt.ylabel('Total Latency (ms)')\n plt.xticks(rotation=45)\n \n # Create subplot for throughput comparison\n plt.subplot(2, 1, 2)\n sns.boxplot(x='Model', y='Qps', data=self.data)\n plt.title('Throughput Comparison Across Models')\n plt.ylabel('Queries Per Second')\n plt.xticks(rotation=45)\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'model_comparison.png')\n plt.close()\n \n def plot_run_comparison(self):\n \"\"\"Compare performance across different test runs.\"\"\"\n if 'run_dir' not in self.data.columns:\n print(\"Run directory column not found for run comparison plot\")\n return\n \n # Check if we have multiple runs\n runs = self.data['run_dir'].unique()\n if len(runs) <= 1:\n print(\"Only one run found, skipping run comparison\")\n return\n \n plt.figure(figsize=(16, 12))\n \n # Create subplot for latency comparison\n plt.subplot(2, 1, 1)\n sns.boxplot(x='run_dir', y='Total Latency', data=self.data)\n plt.title('Latency Comparison Across Test Runs')\n plt.ylabel('Total Latency (ms)')\n plt.xticks(rotation=90)\n \n # Create subplot for throughput comparison\n plt.subplot(2, 1, 2)\n sns.boxplot(x='run_dir', y='Qps', data=self.data)\n plt.title('Throughput Comparison Across Test Runs')\n plt.ylabel('Queries Per Second')\n plt.xticks(rotation=90)\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'run_comparison.png')\n plt.close()\n \n def plot_latency_distribution(self):\n \"\"\"Plot the distribution of latency values.\"\"\"\n if 'Total Latency' not in self.data.columns:\n print(\"Total Latency column not found for latency distribution plot\")\n return\n \n plt.figure(figsize=(12, 8))\n \n # Create histogram with KDE\n sns.histplot(self.data['Total Latency'].dropna(), kde=True, bins=30)\n \n plt.title('Distribution of Total Latency')\n plt.xlabel('Total Latency (ms)')\n plt.ylabel('Frequency')\n \n # Add vertical line for mean and median\n mean_latency = self.data['Total Latency'].mean()\n median_latency = self.data['Total Latency'].median()\n \n plt.axvline(mean_latency, color='r', linestyle='--', label=f'Mean: {mean_latency:.2f} ms')\n plt.axvline(median_latency, color='g', linestyle='-.', label=f'Median: {median_latency:.2f} ms')\n \n plt.legend()\n plt.tight_layout()\n plt.savefig(self.output_dir / 'latency_distribution.png')\n plt.close()\n \n def plot_token_generation_speed(self):\n \"\"\"Plot token generation speed (tokens per second) by concurrency.\"\"\"\n if 'Num Tokens' not in self.data.columns or 'Total Latency' not in self.data.columns:\n print(\"Required columns not found for token generation speed plot\")\n return\n \n # Calculate tokens per second\n self.data['tokens_per_second'] = self.data['Num Tokens'] / (self.data['Total Latency'] / 1000)\n \n plt.figure(figsize=(12, 8))\n \n # Group by concurrency and calculate mean tokens per second\n if 'Concurrency' in self.data.columns:\n grouped = self.data.groupby('Concurrency')['tokens_per_second'].mean().reset_index()\n \n # Plot\n sns.barplot(x='Concurrency', y='tokens_per_second', data=grouped)\n \n plt.title('Token Generation Speed by Concurrency Level')\n plt.xlabel('Concurrent Users')\n plt.ylabel('Tokens Per Second')\n \n # Add value labels on top of bars\n for i, v in enumerate(grouped['tokens_per_second']):\n plt.text(i, v + 0.1, f\"{v:.2f}\", ha='center')\n else:\n # If no concurrency data, just plot overall distribution\n sns.histplot(self.data['tokens_per_second'].dropna(), kde=True, bins=30)\n plt.title('Distribution of Token Generation Speed')\n plt.xlabel('Tokens Per Second')\n plt.ylabel('Frequency')\n \n plt.grid(True, axis='y')\n plt.tight_layout()\n plt.savefig(self.output_dir / 'token_generation_speed.png')\n plt.close()\n \n def generate_summary_report(self):\n \"\"\"Generate a text summary report with key statistics.\"\"\"\n if self.data.empty:\n print(\"No data available for summary report\")\n return\n \n # Calculate summary statistics\n summary = {\n 'total_tests': len(self.data),\n 'unique_models': self.data['Model'].nunique() if 'Model' in self.data.columns else 0,\n 'unique_runs': self.data['run_dir'].nunique() if 'run_dir' in self.data.columns else 0,\n 'avg_latency': self.data['Total Latency'].mean() if 'Total Latency' in self.data.columns else None,\n 'median_latency': self.data['Total Latency'].median() if 'Total Latency' in self.data.columns else None,\n 'avg_qps': self.data['Qps'].mean() if 'Qps' in self.data.columns else None,\n 'max_concurrency': self.data['Concurrency'].max() if 'Concurrency' in self.data.columns else None,\n 'timestamp': datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n }\n \n # Create summary report\n report = f\"\"\"\n # LLM Load Test Summary Report\n \n Generated: {summary['timestamp']}\n \n ## Overview\n - Total tests analyzed: {summary['total_tests']}\n - Unique models tested: {summary['unique_models']}\n - Unique test runs: {summary['unique_runs']}\n \n ## Performance Metrics\n - Average latency: {summary['avg_latency']:.2f} ms\n - Median latency: {summary['median_latency']:.2f} ms\n - Average throughput: {summary['avg_qps']:.2f} QPS\n - Maximum concurrency tested: {summary['max_concurrency']}\n \n ## Visualizations\n The following visualization files have been generated in {self.output_dir}:\n - latency_by_concurrency.png\n - throughput_by_concurrency.png\n - latency_by_output_tokens.png\n - heatmap_latency.png\n - latency_distribution.png\n - token_generation_speed.png\n \"\"\"\n \n if summary['unique_models'] > 1:\n report += \"- model_comparison.png\\n\"\n \n if summary['unique_runs'] > 1:\n report += \"- run_comparison.png\\n\"\n \n # Write report to file\n with open(self.output_dir / 'summary_report.md', 'w') as f:\n f.write(report)\n \n print(f\"Summary report generated at {self.output_dir / 'summary_report.md'}\")\n \n def generate_all_visualizations(self):\n \"\"\"Generate all available visualizations.\"\"\"\n print(\"Generating visualizations...\")\n \n self.plot_latency_by_concurrency()\n self.plot_throughput_by_concurrency()\n self.plot_latency_by_output_tokens()\n self.plot_heatmap_latency()\n self.plot_model_comparison()\n self.plot_run_comparison()\n self.plot_latency_distribution()\n self.plot_token_generation_speed()\n self.generate_summary_report()\n \n print(f\"All visualizations generated in {self.output_dir}\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Generate visualizations for LLM load test results')\n parser.add_argument('--results-dir', type=str, default='results',\n help='Directory containing test results (default: results)')\n parser.add_argument('--output-dir', type=str, default=None,\n help='Directory to save visualizations (default: results_dir/visualizations)')\n parser.add_argument('--run', type=str, default=None,\n help='Specific run directory to analyze (default: analyze all runs)')\n \n args = parser.parse_args()\n \n # Determine the results directory\n base_dir = Path(args.results_dir)\n \n if args.run:\n # If a specific run is specified, use that directory\n results_dir = base_dir / args.run\n if not results_dir.exists():\n print(f\"Run directory {results_dir} not found\")\n return\n else:\n # Otherwise use the base results directory\n results_dir = base_dir\n \n try:\n visualizer = ResultsVisualizer(results_dir, args.output_dir)\n visualizer.generate_all_visualizations()\n except Exception as e:\n print(f\"Error generating visualizations: {e}\")\n\n\nif __name__ == \"__main__\":\n main()\n", - line1: 1, - line2: 449, +export const MARKDOWN_ISSUE: BaseMessage[] = [ + { + ftm_role: "system", + ftm_content: + "[mode3] You are Refact Agent, an autonomous bot for coding tasks.\n\nCore Principles\n1. Use knowledge()\n - Always use knowledge() first when you encounter an agentic (complex) task.\n - This tool can access external data, including successful \"trajectories\" (examples of past solutions).\n - External database records begin with the icon \"🗃️\" followed by a record identifier.\n - Use these records to help solve your tasks by analogy.\n2. Use locate() with the Full Problem Statement\n - Provide the entire user request in the problem_statement argument to avoid losing any details (\"telephone game\" effect).\n - Include user's emotional stance, code snippets, formatting, instructions—everything word-for-word.\n - Only omit parts of the user's request if they are unrelated to the final solution.\n - Avoid using locate() if the problem is quite simple and can be solved without extensive project analysis.\n3. Execute Changes and Validate\n - When a solution requires file modifications, use the appropriate *_textdoc() tools.\n - After making changes, perform a validation step by reviewing modified files using cat() or similar tools.\n - Check for available build tools (like cmdline_cargo_check, cmdline_cargo_build, etc.) and use them to validate changes.\n - Ensure all changes are complete and consistent with the project's standards.\n - If build validation fails or other issues are found, collect additional context and revise the changes.\n\nAnswering Strategy\n1. If the user’s question is unrelated to the project\n - Answer directly without using any special calls.\n2. If the user’s question is related to the project\n - First, call knowledge() for relevant information and best practices.\n3. Making Changes\n - If a solution requires file changes, use `*_textdoc()` tools.\n - It's a good practice to call cat() to track changes for changed files.\n\nImportant Notes\n1. Parallel Exploration\n - When you explore different ideas, use multiple parallel methods.\n2. Project-Related Questions\n - For any project question, always call knowledge() before taking any action.\n3. Knowledge Building (Automatic)\n - After completing any significant task, AUTOMATICALLY use create_knowledge() without waiting for user prompting:\n * Important code patterns and their usage locations\n * Key relationships between classes/functions\n * File dependencies and project structure insights\n * Successful solution patterns for future reference\n - Proactively create knowledge entries whenever you:\n * Solve a problem or implement a feature\n * Discover patterns in the codebase\n * Learn something about project structure or dependencies\n * Fix a bug or identify potential issues\n * Analyze placeholders, test data, or configuration files\n - Consider each interaction an opportunity to build the knowledge base - don't wait for explicit instructions\n4. Continuous Learning\n - Treat every interaction as a learning opportunity\n - When you encounter interesting code patterns, project structures, or implementation details, document them\n - If you analyze placeholders, test data, or configuration files, record your findings\n - Don't wait for the user to ask you to remember - proactively build the knowledge base\n\nWhen running on user's laptop, you most likely have the shell() tool. It's for one-time dependency installations, or doing whatever\nuser is asking you to do. Tools the user can set up are better, because they don't require confimations when running on a laptop.\nWhen doing something for the project using shell() tool, offer the user to make a cmdline_* tool after you have successfully run\nthe shell() call. But double-check that it doesn't already exist, and it is actually typical for this kind of project. You can offer\nthis by writing:\n\n🧩SETTINGS:cmdline_cargo_check\n\nfrom a new line, that will open (when clicked) a wizard that creates `cargo check` (in this example) command line tool.\n\nIn a similar way, service_* tools work. The difference is cmdline_* is designed for non-interactive blocking commands that immediately\nreturn text in stdout/stderr, and service_* is designed for blocking background commands, such as hypercorn server that runs forever until you hit Ctrl+C.\nHere is another example:\n\n🧩SETTINGS:service_hypercorn\n\n\nYou might receive additional instructions that start with 💿. Those are not coming from the user, they are programmed to help you operate\nwell and they are always in English. Answer in the language the user has asked the question.\n\n\n- below general information about the current project -\n\nThe current IDE workspace has these project directories:\n/home/fupfv/git/benchmark1_0701\n\nThe active IDE file is:\n/home/fupfv/.config/Code/User/settings.json\nThere's no version control detected, complain to user if they want to use anything git/hg/svn/etc.\n\n\n\nIMPORTANT: Knowledge creation is not optional. After EVERY significant task, AUTOMATICALLY create a knowledge entry using create_knowledge() without waiting for user prompting. This is a core part of your workflow.\n", + }, + { + ftm_role: "user", + ftm_content: "Can you create comprehensive documentation?\n", + checkpoints: [ + { + workspace_folder: "/home/fupfv/git/benchmark1_0701", + commit_hash: "7e71dfea56b596c563725dd9772d1440185ef102", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "I'd be happy to help create comprehensive documentation for your project. To provide the most useful documentation, I need to understand what project you're working with and what kind of documentation you need.\n\nLet me first explore the project structure to get a better understanding of what we're working with.", + ftm_tool_calls: [ + { + id: "toolu_01JbWarAwzjMyV6azDkd5skX", + function: { + arguments: '{"use_ast": true}', + name: "tree", }, - ]), - }, - { - ftm_role: "assistant", - ftm_content: - "Based on my analysis of the project, I'll create comprehensive documentation for the LLM Load Testing Tool. Let me create a documentation structure that covers all aspects of the project.", - ftm_tool_calls: [ - { - id: "toolu_011nt32gEpSJZet4jGzrVXBt", - function: { - arguments: - '{"path": "/home/fupfv/git/benchmark1_0701/docs/README.md", "content": "# LLM Load Testing Tool Documentation\\n\\nWelcome to the comprehensive documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\\n\\n## Table of Contents\\n\\n1. [Introduction](#introduction)\\n2. [Getting Started](#getting-started)\\n - [Installation](#installation)\\n - [Quick Start](#quick-start)\\n3. [Core Components](#core-components)\\n4. [Running Tests](#running-tests)\\n - [Basic Usage](#basic-usage)\\n - [Configuration Options](#configuration-options)\\n - [Test Parameters](#test-parameters)\\n5. [Understanding Results](#understanding-results)\\n - [Report Files](#report-files)\\n - [Metrics Explained](#metrics-explained)\\n6. [Visualization Tools](#visualization-tools)\\n - [Single Run Visualization](#single-run-visualization)\\n - [Run Comparison](#run-comparison)\\n - [Grafana Dashboard](#grafana-dashboard)\\n7. [Advanced Usage](#advanced-usage)\\n - [Custom Prompts](#custom-prompts)\\n - [Provider-Specific Options](#provider-specific-options)\\n - [Scaling Tests](#scaling-tests)\\n8. [Troubleshooting](#troubleshooting)\\n9. [API Reference](#api-reference)\\n10. [Contributing](#contributing)\\n\\n## Introduction\\n\\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\\n\\nKey features include:\\n- Testing with different concurrency levels and output token counts\\n- Comprehensive metrics collection (latency, throughput, token generation speed)\\n- Detailed visualization of test results\\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\\n- Comparison of different test runs and models\\n\\n## Getting Started\\n\\n### Installation\\n\\n1. Clone the repository:\\n```bash\\ngit clone \\ncd llm-load-test\\n```\\n\\n2. Install dependencies:\\n```bash\\npip install -r requirements.txt\\n```\\n\\n3. Make scripts executable (optional):\\n```bash\\nbash make_scripts_executable.sh\\n```\\n\\n### Quick Start\\n\\nRun a basic load test with default parameters:\\n\\n```bash\\npython src/llm_load_test_runner.py\\n```\\n\\nThis will run tests with the default configuration:\\n- Users: 1, 2, 50\\n- Output tokens: 15, 30\\n- Run time: 60s per test\\n- Prompt tokens: 4046\\n- Temperature: 1.0\\n\\n## Core Components\\n\\nThe tool consists of several key components:\\n\\n1. **llm_load_test_runner.py**: The main script that orchestrates the load tests.\\n2. **load_test.py**: The Locust implementation that handles the actual request generation.\\n3. **llm_test_logger.py**: Handles logging of test results.\\n4. **visualize_results.py**: Generates visualizations from test results.\\n5. **compare_runs.py**: Compares results from different test runs.\\n6. **dashboard_generator.py**: Creates Grafana dashboard configurations.\\n\\n## Running Tests\\n\\n### Basic Usage\\n\\nThe simplest way to run a test is:\\n\\n```bash\\npython src/llm_load_test_runner.py\\n```\\n\\n### Configuration Options\\n\\nYou can customize the test by modifying the `base_config` dictionary in `llm_load_test_runner.py`:\\n\\n```python\\nbase_config = {\\n \\"host\\": \\"https://your-llm-server.com/\\",\\n \\"provider\\": \\"openai\\",\\n \\"model\\": \\"your-model-name\\",\\n \\"api-key\\": \\"your-api-key\\",\\n \\"logprobs\\": 5,\\n \\"run-time\\": \\"60s\\",\\n \\"temperature\\": 1.0,\\n}\\n```\\n\\n### Test Parameters\\n\\nThe main test parameters you can adjust include:\\n\\n- **concurrent_users**: List of user counts to test (e.g., `[1, 2, 5, 10, 20, 50]`)\\n- **max_tokens**: List of output token counts to test (e.g., `[1, 15, 30, 50]`)\\n- **run-time**: Duration of each test (e.g., `\\"60s\\"`)\\n- **prompt-tokens**: Number of tokens in the prompt (default: 4046)\\n- **temperature**: Temperature parameter for generation (default: 1.0)\\n\\nExample of customizing test parameters:\\n\\n```python\\nconcurrent_users = [1, 5, 10]\\nmax_tokens = [10, 50, 100]\\nbase_config[\\"run-time\\"] = \\"120s\\"\\n```\\n\\n## Understanding Results\\n\\n### Report Files\\n\\nAfter running tests, results are saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\\n\\n1. **Individual test results**: `results_test_u{users}_o{tokens}.csv`\\n2. **Summary reports**:\\n - `load_test_report_YYYYMMDD_HHMMSS.csv`\\n - `load_test_report_YYYYMMDD_HHMMSS.json`\\n\\n### Metrics Explained\\n\\nThe test collects and reports the following metrics:\\n\\n- **Total Latency**: Total time from request to complete response (ms)\\n- **Time To First Token**: Time until the first token is received (ms)\\n- **Latency Per Token**: Average time to generate each token (ms)\\n- **Num Tokens**: Number of tokens generated\\n- **Num Requests**: Total number of requests processed\\n- **Qps**: Queries per second (throughput)\\n- **Prompt Tokens**: Number of tokens in the prompt\\n- **Generation Tokens**: Number of tokens requested to generate\\n\\n## Visualization Tools\\n\\nThe project includes several visualization tools to help analyze test results.\\n\\n### Single Run Visualization\\n\\nGenerate visualizations for a single test run:\\n\\n```bash\\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\\n```\\n\\nThis generates the following visualizations:\\n- Latency by concurrency level\\n- Throughput (QPS) by concurrency level\\n- Latency by output token count\\n- Heatmap of latency by concurrency and output tokens\\n- Latency distribution\\n- Token generation speed\\n- Summary report\\n\\n### Run Comparison\\n\\nCompare results across multiple test runs:\\n\\n```bash\\npython src/compare_runs.py --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\\n```\\n\\nThis generates comparison visualizations showing:\\n- Latency metrics comparison across runs\\n- Throughput comparison across runs\\n- Performance by concurrency level across runs\\n- Performance by output token count across runs\\n\\n### Grafana Dashboard\\n\\nCreate a Grafana dashboard configuration for real-time monitoring:\\n\\n```bash\\npython src/dashboard_generator.py --output-file grafana-dashboard.json\\n```\\n\\nTo use the dashboard:\\n1. Open Grafana in your web browser\\n2. Navigate to Dashboards > Import\\n3. Upload the generated JSON file or paste its contents\\n4. Configure the data source if prompted\\n5. Click Import\\n\\n## Advanced Usage\\n\\n### Custom Prompts\\n\\nYou can customize the prompt used for testing by modifying the `prompt` variable in `load_test.py`:\\n\\n```python\\nprompt = \\"\\"\\"Your custom prompt text here\\"\\"\\"\\n```\\n\\nFor more complex scenarios, you can use a file with multiple prompts:\\n\\n```bash\\npython src/llm_load_test_runner.py --prompt-text @prompts.jsonl\\n```\\n\\n### Provider-Specific Options\\n\\nThe tool supports various LLM providers, each with specific configuration options:\\n\\n- **OpenAI/Compatible APIs**: Uses the standard OpenAI API format\\n- **vLLM**: Optimized for vLLM servers with specific parameters\\n- **Fireworks**: Includes Fireworks-specific optimizations\\n- **Triton**: Support for Triton Inference Server\\n\\nProvider-specific options can be set in the `format_payload` method of each provider class in `load_test.py`.\\n\\n### Scaling Tests\\n\\nFor large-scale testing, consider:\\n\\n1. **Increasing run time**: Set longer test durations for more stable results\\n ```python\\n base_config[\\"run-time\\"] = \\"300s\\" # 5 minutes per test\\n ```\\n\\n2. **Testing with more concurrent users**: Add higher concurrency levels\\n ```python\\n concurrent_users = [1, 10, 50, 100, 200]\\n ```\\n\\n3. **Using QPS mode**: Instead of fixed users, target a specific query rate\\n ```bash\\n python src/load_test.py --qps 10 --users 100\\n ```\\n\\n## Troubleshooting\\n\\nCommon issues and solutions:\\n\\n1. **Connection errors**: Check your API key and server URL\\n2. **Timeout errors**: Increase timeout settings or reduce concurrency\\n3. **Inconsistent results**: Increase test duration for more stable metrics\\n4. **Missing data in visualizations**: Ensure CSV files contain all expected columns\\n5. **Memory errors**: For large datasets, consider filtering data or using a machine with more memory\\n\\n## API Reference\\n\\n### LLMLoadTest Class\\n\\nThe main class that orchestrates the load tests.\\n\\n**Methods**:\\n- `__init__(base_config, results_dir)`: Initialize with configuration and results directory\\n- `run_test(test_name, users, output_tokens)`: Run a single test with specified parameters\\n- `write_test_report(...)`: Write test results to CSV and JSON files\\n- `generate_report()`: Generate a summary report of all tests\\n\\n### ResultsVisualizer Class\\n\\nGenerates visualizations from test results.\\n\\n**Methods**:\\n- `__init__(results_dir, output_dir)`: Initialize with results and output directories\\n- `generate_all_visualizations()`: Generate all available visualizations\\n- Various plotting methods for specific visualizations\\n\\n## Contributing\\n\\nContributions to the LLM Load Testing Tool are welcome! Here\'s how you can contribute:\\n\\n1. **Bug reports**: Open an issue describing the bug and steps to reproduce\\n2. **Feature requests**: Open an issue describing the desired feature\\n3. **Code contributions**: Fork the repository, make changes, and submit a pull request\\n4. **Documentation improvements**: Submit pull requests with documentation updates\\n\\nPlease follow the existing code style and include tests for new features."}', - name: "create_textdoc", - }, - type: "function", - index: 0, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01JbWarAwzjMyV6azDkd5skX", + ftm_content: + "/\n home/\n fupfv/\n git/\n benchmark1_0701/\n 12.zip\n LICENSE\n README.md\n VISUALIZATION.md\n example_new_file.py\n grafana-dashboard.json\n llm_load_test.zip\n llm_load_test/\n README.md\n requirements.txt\n src/\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.py\n load_test_report_20240811_002319.csv\n load_test_report_20240811_002319.json\n make_scripts_executable.sh\n requirements.txt\n results/\n run_20250129_152629/\n load_test_report_2025-01-29T152630.827620.csv\n load_test_report_2025-01-29T152630.827620.json\n load_test_report_2025-01-29T152636.621391.csv\n load_test_report_2025-01-29T152636.621391.json\n load_test_report_2025-01-29T152642.333384.csv\n load_test_report_2025-01-29T152642.333384.json\n load_test_report_2025-01-29T152648.032846.csv\n load_test_report_2025-01-29T152648.032846.json\n load_test_report_2025-01-29T152653.733025.csv\n load_test_report_2025-01-29T152653.733025.json\n load_test_report_2025-01-29T152659.442419.csv\n load_test_report_2025-01-29T152659.442419.json\n load_test_report_20250129_152704.csv\n load_test_report_20250129_152704.json\n run_20250129_152807/\n load_test_report_2025-01-29T152808.476840.csv\n load_test_report_2025-01-29T152808.476840.json\n load_test_report_2025-01-29T152814.290370.csv\n load_test_report_2025-01-29T152814.290370.json\n load_test_report_2025-01-29T152819.988992.csv\n load_test_report_2025-01-29T152819.988992.json\n load_test_report_2025-01-29T152825.712261.csv\n load_test_report_2025-01-29T152825.712261.json\n load_test_report_2025-01-29T152831.461047.csv\n load_test_report_2025-01-29T152831.461047.json\n load_test_report_2025-01-29T152837.233726.csv\n load_test_report_2025-01-29T152837.233726.json\n load_test_report_20250129_152842.csv\n load_test_report_20250129_152842.json\n run_20250129_152930/\n load_test_report_2025-01-29T153031.809694.csv\n load_test_report_2025-01-29T153031.809694.json\n load_test_report_2025-01-29T153137.610641.csv\n load_test_report_2025-01-29T153137.610641.json\n load_test_report_2025-01-29T153243.818603.csv\n load_test_report_2025-01-29T153243.818603.json\n load_test_report_2025-01-29T153349.887918.csv\n load_test_report_2025-01-29T153349.887918.json\n load_test_report_2025-01-29T153504.701174.csv\n load_test_report_2025-01-29T153504.701174.json\n load_test_report_2025-01-29T153615.800362.csv\n load_test_report_2025-01-29T153615.800362.json\n load_test_report_20250129_153620.csv\n load_test_report_20250129_153620.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n src/\n __pycache__/\n llm_test_logger.cpython-310.pyc\n load_test.cpython-310.pyc\n compare_runs.py\n dashboard_generator.py\n from transformers import AutoTokenizer.py\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.log\n load_test.py\n load_test_aggregator.py\n load_test_tgi.py\n load_test_vllm.py\n qwen_run_20250128_193328.zip\n qwen_run_20250129_131310.zip\n results/\n run_20250129_131310/\n load_test_report_2025-01-29T131340.582736.csv\n load_test_report_2025-01-29T131340.582736.json\n load_test_report_2025-01-29T131416.770529.csv\n load_test_report_2025-01-29T131416.770529.json\n load_test_report_2025-01-29T131452.904227.csv\n load_test_report_2025-01-29T131452.904227.json\n load_test_report_2025-01-29T131529.208363.csv\n load_test_report_2025-01-29T131529.208363.json\n load_test_report_2025-01-29T131612.332502.csv\n load_test_report_2025-01-29T131612.332502.json\n load_test_report_2025-01-29T131654.024454.csv\n load_test_report_2025-01-29T131654.024454.json\n load_test_report_20250129_131659.csv\n load_test_report_20250129_131659.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_131828/\n load_test_report_2025-01-29T131859.729718.csv\n load_test_report_2025-01-29T131859.729718.json\n load_test_report_2025-01-29T131935.556939.csv\n load_test_report_2025-01-29T131935.556939.json\n load_test_report_2025-01-29T132011.817203.csv\n load_test_report_2025-01-29T132011.817203.json\n load_test_report_2025-01-29T132047.948690.csv\n load_test_report_2025-01-29T132047.948690.json\n load_test_report_2025-01-29T132140.620425.csv\n load_test_report_2025-01-29T132140.620425.json\n load_test_report_2025-01-29T132237.254055.csv\n load_test_report_2025-01-29T132237.254055.json\n load_test_report_20250129_132242.csv\n load_test_report_20250129_132242.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_132842/\n load_test_report_2025-01-29T132913.096074.csv\n load_test_report_2025-01-29T132913.096074.json\n load_test_report_2025-01-29T132949.286127.csv\n load_test_report_2025-01-29T132949.286127.json\n load_test_report_2025-01-29T133025.273897.csv\n load_test_report_2025-01-29T133025.273897.json\n load_test_report_2025-01-29T133102.000762.csv\n load_test_report_2025-01-29T133102.000762.json\n load_test_report_2025-01-29T133154.340248.csv\n load_test_report_2025-01-29T133154.340248.json\n load_test_report_2025-01-29T133257.783732.csv\n load_test_report_2025-01-29T133257.783732.json\n load_test_report_20250129_133302.csv\n load_test_report_20250129_133302.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_133711/\n load_test_report_2025-01-29T133742.239356.csv\n load_test_report_2025-01-29T133742.239356.json\n load_test_report_2025-01-29T133818.175709.csv\n load_test_report_2025-01-29T133818.175709.json\n load_test_report_2025-01-29T133853.789246.csv\n load_test_report_2025-01-29T133853.789246.json\n load_test_report_2025-01-29T133929.633962.csv\n load_test_report_2025-01-29T133929.633962.json\n load_test_report_2025-01-29T134013.341083.csv\n load_test_report_2025-01-29T134013.341083.json\n load_test_report_2025-01-29T134101.336503.csv\n load_test_report_2025-01-29T134101.336503.json\n load_test_report_20250129_134106.csv\n load_test_report_20250129_134106.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_134818/\n load_test_report_2025-01-29T134919.598778.csv\n load_test_report_2025-01-29T134919.598778.json\n load_test_report_2025-01-29T135025.745361.csv\n load_test_report_2025-01-29T135025.745361.json\n load_test_report_2025-01-29T135131.347054.csv\n load_test_report_2025-01-29T135131.347054.json\n load_test_report_2025-01-29T135237.241605.csv\n load_test_report_2025-01-29T135237.241605.json\n load_test_report_2025-01-29T135352.526234.csv\n load_test_report_2025-01-29T135352.526234.json\n load_test_report_2025-01-29T135509.169860.csv\n load_test_report_2025-01-29T135509.169860.json\n load_test_report_20250129_135514.csv\n load_test_report_20250129_135514.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_135810/\n load_test_report_2025-01-29T135911.302460.csv\n load_test_report_2025-01-29T135911.302460.json\n load_test_report_2025-01-29T140017.766295.csv\n load_test_report_2025-01-29T140017.766295.json\n load_test_report_2025-01-29T140123.329253.csv\n load_test_report_2025-01-29T140123.329253.json\n load_test_report_2025-01-29T140229.087510.csv\n load_test_report_2025-01-29T140229.087510.json\n load_test_report_2025-01-29T140354.254251.csv\n load_test_report_2025-01-29T140354.254251.json\n load_test_report_2025-01-29T140522.596391.csv\n load_test_report_2025-01-29T140522.596391.json\n load_test_report_20250129_140527.csv\n load_test_report_20250129_140527.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_140726/\n load_test_report_2025-01-29T140828.249744.csv\n load_test_report_2025-01-29T140828.249744.json\n load_test_report_2025-01-29T140935.241087.csv\n load_test_report_2025-01-29T140935.241087.json\n load_test_report_2025-01-29T141041.737827.csv\n load_test_report_2025-01-29T141041.737827.json\n load_test_report_2025-01-29T141148.575547.csv\n load_test_report_2025-01-29T141148.575547.json\n load_test_report_2025-01-29T141257.979330.csv\n load_test_report_2025-01-29T141257.979330.json\n load_test_report_2025-01-29T141407.813467.csv\n load_test_report_2025-01-29T141407.813467.json\n load_test_report_2025-01-29T141517.031485.csv\n load_test_report_2025-01-29T141517.031485.json\n load_test_report_2025-01-29T141626.812125.csv\n load_test_report_2025-01-29T141626.812125.json\n load_test_report_2025-01-29T141738.980843.csv\n load_test_report_2025-01-29T141738.980843.json\n load_test_report_2025-01-29T141852.372524.csv\n load_test_report_2025-01-29T141852.372524.json\n load_test_report_2025-01-29T142006.313659.csv\n load_test_report_2025-01-29T142006.313659.json\n load_test_report_2025-01-29T142122.053494.csv\n load_test_report_2025-01-29T142122.053494.json\n load_test_report_20250129_142127.csv\n load_test_report_20250129_142127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_142324/\n load_test_report_2025-01-29T142426.095040.csv\n load_test_report_2025-01-29T142426.095040.json\n load_test_report_2025-01-29T142532.101781.csv\n load_test_report_2025-01-29T142532.101781.json\n load_test_report_2025-01-29T142638.130364.csv\n load_test_report_2025-01-29T142638.130364.json\n load_test_report_2025-01-29T142744.373122.csv\n load_test_report_2025-01-29T142744.373122.json\n load_test_report_2025-01-29T142851.436595.csv\n load_test_report_2025-01-29T142851.436595.json\n load_test_report_2025-01-29T142958.649875.csv\n load_test_report_2025-01-29T142958.649875.json\n load_test_report_2025-01-29T143105.820377.csv\n load_test_report_2025-01-29T143105.820377.json\n load_test_report_2025-01-29T143213.483254.csv\n load_test_report_2025-01-29T143213.483254.json\n load_test_report_2025-01-29T143322.075349.csv\n load_test_report_2025-01-29T143322.075349.json\n load_test_report_2025-01-29T143431.160350.csv\n load_test_report_2025-01-29T143431.160350.json\n load_test_report_2025-01-29T143540.792112.csv\n load_test_report_2025-01-29T143540.792112.json\n load_test_report_2025-01-29T143651.193158.csv\n load_test_report_2025-01-29T143651.193158.json\n load_test_report_20250129_143656.csv\n load_test_report_20250129_143656.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_144231/\n load_test_report_2025-01-29T144333.225207.csv\n load_test_report_2025-01-29T144333.225207.json\n load_test_report_2025-01-29T144441.892228.csv\n load_test_report_2025-01-29T144441.892228.json\n load_test_report_2025-01-29T144548.216391.csv\n load_test_report_2025-01-29T144548.216391.json\n load_test_report_2025-01-29T144654.207507.csv\n load_test_report_2025-01-29T144654.207507.json\n load_test_report_2025-01-29T144801.887104.csv\n load_test_report_2025-01-29T144801.887104.json\n load_test_report_2025-01-29T144907.892024.csv\n load_test_report_2025-01-29T144907.892024.json\n load_test_report_2025-01-29T145015.606306.csv\n load_test_report_2025-01-29T145015.606306.json\n load_test_report_2025-01-29T145124.318365.csv\n load_test_report_2025-01-29T145124.318365.json\n load_test_report_2025-01-29T145232.316758.csv\n load_test_report_2025-01-29T145232.316758.json\n load_test_report_2025-01-29T145338.561407.csv\n load_test_report_2025-01-29T145338.561407.json\n load_test_report_2025-01-29T145447.340833.csv\n load_test_report_2025-01-29T145447.340833.json\n load_test_report_2025-01-29T145556.603603.csv\n load_test_report_2025-01-29T145556.603603.json\n load_test_report_20250129_145601.csv\n load_test_report_20250129_145601.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_145926/\n load_test_report_2025-01-29T150027.790900.csv\n load_test_report_2025-01-29T150027.790900.json\n load_test_report_2025-01-29T150134.652497.csv\n load_test_report_2025-01-29T150134.652497.json\n load_test_report_2025-01-29T150242.312479.csv\n load_test_report_2025-01-29T150242.312479.json\n load_test_report_2025-01-29T150348.489497.csv\n load_test_report_2025-01-29T150348.489497.json\n load_test_report_2025-01-29T150454.976232.csv\n load_test_report_2025-01-29T150454.976232.json\n load_test_report_2025-01-29T150600.673114.csv\n load_test_report_2025-01-29T150600.673114.json\n load_test_report_2025-01-29T150708.380006.csv\n load_test_report_2025-01-29T150708.380006.json\n load_test_report_2025-01-29T150814.575034.csv\n load_test_report_2025-01-29T150814.575034.json\n load_test_report_2025-01-29T150923.544283.csv\n load_test_report_2025-01-29T150923.544283.json\n load_test_report_2025-01-29T151030.283486.csv\n load_test_report_2025-01-29T151030.283486.json\n load_test_report_2025-01-29T151138.589944.csv\n load_test_report_2025-01-29T151138.589944.json\n load_test_report_2025-01-29T151248.730621.csv\n load_test_report_2025-01-29T151248.730621.json\n load_test_report_20250129_151253.csv\n load_test_report_20250129_151253.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_160612/\n load_test_report_2025-01-29T160713.432216.csv\n load_test_report_2025-01-29T160713.432216.json\n load_test_report_2025-01-29T160819.907680.csv\n load_test_report_2025-01-29T160819.907680.json\n load_test_report_2025-01-29T160926.784918.csv\n load_test_report_2025-01-29T160926.784918.json\n load_test_report_2025-01-29T161033.828339.csv\n load_test_report_2025-01-29T161033.828339.json\n load_test_report_2025-01-29T161153.205639.csv\n load_test_report_2025-01-29T161153.205639.json\n load_test_report_2025-01-29T161315.237414.csv\n load_test_report_2025-01-29T161315.237414.json\n load_test_report_20250129_161320.csv\n load_test_report_20250129_161320.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_161925/\n load_test_report_2025-01-29T162025.734114.csv\n load_test_report_2025-01-29T162025.734114.json\n load_test_report_2025-01-29T162131.524371.csv\n load_test_report_2025-01-29T162131.524371.json\n load_test_report_2025-01-29T162237.758517.csv\n load_test_report_2025-01-29T162237.758517.json\n load_test_report_2025-01-29T162344.818406.csv\n load_test_report_2025-01-29T162344.818406.json\n load_test_report_2025-01-29T162507.384913.csv\n load_test_report_2025-01-29T162507.384913.json\n load_test_report_2025-01-29T162613.335853.csv\n load_test_report_2025-01-29T162613.335853.json\n load_test_report_20250129_162618.csv\n load_test_report_20250129_162618.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_162732/\n load_test_report_2025-01-29T162834.272459.csv\n load_test_report_2025-01-29T162834.272459.json\n load_test_report_2025-01-29T162941.672408.csv\n load_test_report_2025-01-29T162941.672408.json\n load_test_report_2025-01-29T163048.857712.csv\n load_test_report_2025-01-29T163048.857712.json\n load_test_report_2025-01-29T163157.624546.csv\n load_test_report_2025-01-29T163157.624546.json\n load_test_report_2025-01-29T163306.370415.csv\n load_test_report_2025-01-29T163306.370415.json\n load_test_report_2025-01-29T163416.065472.csv\n load_test_report_2025-01-29T163416.065472.json\n load_test_report_2025-01-29T163524.604470.csv\n load_test_report_2025-01-29T163524.604470.json\n load_test_report_2025-01-29T163632.880248.csv\n load_test_report_2025-01-29T163632.880248.json\n load_test_report_2025-01-29T163745.002002.csv\n load_test_report_2025-01-29T163745.002002.json\n load_test_report_2025-01-29T163902.036068.csv\n load_test_report_2025-01-29T163902.036068.json\n load_test_report_2025-01-29T164009.453151.csv\n load_test_report_2025-01-29T164009.453151.json\n load_test_report_2025-01-29T164122.568066.csv\n load_test_report_2025-01-29T164122.568066.json\n load_test_report_20250129_164127.csv\n load_test_report_20250129_164127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_164620/\n load_test_report_2025-01-29T164721.700661.csv\n load_test_report_2025-01-29T164721.700661.json\n load_test_report_2025-01-29T164827.520353.csv\n load_test_report_2025-01-29T164827.520353.json\n load_test_report_2025-01-29T164933.310367.csv\n load_test_report_2025-01-29T164933.310367.json\n load_test_report_2025-01-29T165039.642351.csv\n load_test_report_2025-01-29T165039.642351.json\n load_test_report_2025-01-29T165154.098239.csv\n load_test_report_2025-01-29T165154.098239.json\n load_test_report_2025-01-29T165308.831481.csv\n load_test_report_2025-01-29T165308.831481.json\n load_test_report_20250129_165313.csv\n load_test_report_20250129_165313.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_165758/\n load_test_report_2025-01-29T165859.461686.csv\n load_test_report_2025-01-29T165859.461686.json\n load_test_report_2025-01-29T170005.472004.csv\n load_test_report_2025-01-29T170005.472004.json\n load_test_report_2025-01-29T170111.422122.csv\n load_test_report_2025-01-29T170111.422122.json\n load_test_report_2025-01-29T170217.557618.csv\n load_test_report_2025-01-29T170217.557618.json\n load_test_report_2025-01-29T170330.493971.csv\n load_test_report_2025-01-29T170330.493971.json\n load_test_report_2025-01-29T170447.558129.csv\n load_test_report_2025-01-29T170447.558129.json\n load_test_report_20250129_170452.csv\n load_test_report_20250129_170452.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_170950/\n load_test_report_2025-01-29T171051.361008.csv\n load_test_report_2025-01-29T171051.361008.json\n load_test_report_2025-01-29T171157.323565.csv\n load_test_report_2025-01-29T171157.323565.json\n load_test_report_2025-01-29T171303.299586.csv\n load_test_report_2025-01-29T171303.299586.json\n load_test_report_2025-01-29T171409.108765.csv\n load_test_report_2025-01-29T171409.108765.json\n load_test_report_2025-01-29T171514.861147.csv\n load_test_report_2025-01-29T171514.861147.json\n load_test_report_2025-01-29T171620.615624.csv\n load_test_report_2025-01-29T171620.615624.json\n load_test_report_2025-01-29T171726.893447.csv\n load_test_report_2025-01-29T171726.893447.json\n load_test_report_2025-01-29T171833.044767.csv\n load_test_report_2025-01-29T171833.044767.json\n load_test_report_2025-01-29T171939.151837.csv\n load_test_report_2025-01-29T171939.151837.json\n load_test_report_2025-01-29T172045.358719.csv\n load_test_report_2025-01-29T172045.358719.json\n load_test_report_2025-01-29T172151.647824.csv\n load_test_report_2025-01-29T172151.647824.json\n load_test_report_2025-01-29T172257.931381.csv\n load_test_report_2025-01-29T172257.931381.json\n load_test_report_2025-01-29T172404.993732.csv\n load_test_report_2025-01-29T172404.993732.json\n load_test_report_2025-01-29T172512.469972.csv\n load_test_report_2025-01-29T172512.469972.json\n load_test_report_2025-01-29T172619.912159.csv\n load_test_report_2025-01-29T172619.912159.json\n load_test_report_2025-01-29T172727.520335.csv\n load_test_report_2025-01-29T172727.520335.json\n load_test_report_2025-01-29T172836.287202.csv\n load_test_report_2025-01-29T172836.287202.json\n load_test_report_2025-01-29T172945.243054.csv\n load_test_report_2025-01-29T172945.243054.json\n load_test_report_2025-01-29T173054.878245.csv\n load_test_report_2025-01-29T173054.878245.json\n load_test_report_2025-01-29T173205.270695.csv\n load_test_report_2025-01-29T173205.270695.json\n load_test_report_2025-01-29T173319.135777.csv\n load_test_report_2025-01-29T173319.135777.json\n load_test_report_2025-01-29T173434.082094.csv\n load_test_report_2025-01-29T173434.082094.json\n load_test_report_2025-01-29T173550.513858.csv\n load_test_report_2025-01-29T173550.513858.json\n load_test_report_2025-01-29T173708.906195.csv\n load_test_report_2025-01-29T173708.906195.json\n load_test_report_20250129_173713.csv\n load_test_report_20250129_173713.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_174215/\n load_test_report_2025-01-29T174316.520550.csv\n load_test_report_2025-01-29T174316.520550.json\n load_test_report_2025-01-29T174422.384594.csv\n load_test_report_2025-01-29T174422.384594.json\n load_test_report_2025-01-29T174528.291764.csv\n load_test_report_2025-01-29T174528.291764.json\n load_test_report_2025-01-29T174633.925509.csv\n load_test_report_2025-01-29T174633.925509.json\n load_test_report_2025-01-29T174740.096886.csv\n load_test_report_2025-01-29T174740.096886.json\n load_test_report_2025-01-29T174845.697959.csv\n load_test_report_2025-01-29T174845.697959.json\n load_test_report_2025-01-29T174952.084484.csv\n load_test_report_2025-01-29T174952.084484.json\n load_test_report_2025-01-29T175058.845237.csv\n load_test_report_2025-01-29T175058.845237.json\n load_test_report_2025-01-29T175205.494738.csv\n load_test_report_2025-01-29T175205.494738.json\n load_test_report_2025-01-29T175312.831611.csv\n load_test_report_2025-01-29T175312.831611.json\n load_test_report_2025-01-29T175419.902976.csv\n load_test_report_2025-01-29T175419.902976.json\n load_test_report_2025-01-29T175527.241889.csv\n load_test_report_2025-01-29T175527.241889.json\n load_test_report_2025-01-29T175635.835204.csv\n load_test_report_2025-01-29T175635.835204.json\n load_test_report_2025-01-29T175744.448069.csv\n load_test_report_2025-01-29T175744.448069.json\n load_test_report_2025-01-29T175853.905293.csv\n load_test_report_2025-01-29T175853.905293.json\n load_test_report_2025-01-29T180003.565666.csv\n load_test_report_2025-01-29T180003.565666.json\n load_test_report_2025-01-29T180115.557518.csv\n load_test_report_2025-01-29T180115.557518.json\n load_test_report_2025-01-29T180228.466492.csv\n load_test_report_2025-01-29T180228.466492.json\n load_test_report_2025-01-29T180342.419821.csv\n load_test_report_2025-01-29T180342.419821.json\n load_test_report_2025-01-29T180457.796778.csv\n load_test_report_2025-01-29T180457.796778.json\n load_test_report_2025-01-29T180620.304565.csv\n load_test_report_2025-01-29T180620.304565.json\n load_test_report_2025-01-29T180746.057385.csv\n load_test_report_2025-01-29T180746.057385.json\n load_test_report_2025-01-29T180914.893498.csv\n load_test_report_2025-01-29T180914.893498.json\n load_test_report_2025-01-29T181046.064760.csv\n load_test_report_2025-01-29T181046.064760.json\n load_test_report_20250129_181051.csv\n load_test_report_20250129_181051.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n starcoder2_run_20250129_123907.zip\n starcoder_run_20250128_20.zip\n starcoder_run_20250129_131828.zip\n test_single_request.py\n visualize_results.py\n temp_file_renamed.txt\n test_data.txt", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nNow, let me examine the README.md file to understand the project's purpose and structure:", + ftm_tool_calls: [ + { + id: "toolu_01DzbsASaCkQXY2fmUYZjXbj", + function: { + arguments: '{"paths": "README.md", "skeleton": false}', + name: "cat", }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: "/home/fupfv/git/benchmark1_0701/docs/README.md", - file_action: "edit", - line1: 1, - line2: 1, - lines_remove: "", - lines_add: - '# LLM Load Testing Tool Documentation\n\nWelcome to the comprehensive documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\n\n## Table of Contents\n\n1. [Introduction](#introduction)\n2. [Getting Started](#getting-started)\n - [Installation](#installation)\n - [Quick Start](#quick-start)\n3. [Core Components](#core-components)\n4. [Running Tests](#running-tests)\n - [Basic Usage](#basic-usage)\n - [Configuration Options](#configuration-options)\n - [Test Parameters](#test-parameters)\n5. [Understanding Results](#understanding-results)\n - [Report Files](#report-files)\n - [Metrics Explained](#metrics-explained)\n6. [Visualization Tools](#visualization-tools)\n - [Single Run Visualization](#single-run-visualization)\n - [Run Comparison](#run-comparison)\n - [Grafana Dashboard](#grafana-dashboard)\n7. [Advanced Usage](#advanced-usage)\n - [Custom Prompts](#custom-prompts)\n - [Provider-Specific Options](#provider-specific-options)\n - [Scaling Tests](#scaling-tests)\n8. [Troubleshooting](#troubleshooting)\n9. [API Reference](#api-reference)\n10. [Contributing](#contributing)\n\n## Introduction\n\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\n\nKey features include:\n- Testing with different concurrency levels and output token counts\n- Comprehensive metrics collection (latency, throughput, token generation speed)\n- Detailed visualization of test results\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\n- Comparison of different test runs and models\n\n## Getting Started\n\n### Installation\n\n1. Clone the repository:\n```bash\ngit clone \ncd llm-load-test\n```\n\n2. Install dependencies:\n```bash\npip install -r requirements.txt\n```\n\n3. Make scripts executable (optional):\n```bash\nbash make_scripts_executable.sh\n```\n\n### Quick Start\n\nRun a basic load test with default parameters:\n\n```bash\npython src/llm_load_test_runner.py\n```\n\nThis will run tests with the default configuration:\n- Users: 1, 2, 50\n- Output tokens: 15, 30\n- Run time: 60s per test\n- Prompt tokens: 4046\n- Temperature: 1.0\n\n## Core Components\n\nThe tool consists of several key components:\n\n1. **llm_load_test_runner.py**: The main script that orchestrates the load tests.\n2. **load_test.py**: The Locust implementation that handles the actual request generation.\n3. **llm_test_logger.py**: Handles logging of test results.\n4. **visualize_results.py**: Generates visualizations from test results.\n5. **compare_runs.py**: Compares results from different test runs.\n6. **dashboard_generator.py**: Creates Grafana dashboard configurations.\n\n## Running Tests\n\n### Basic Usage\n\nThe simplest way to run a test is:\n\n```bash\npython src/llm_load_test_runner.py\n```\n\n### Configuration Options\n\nYou can customize the test by modifying the `base_config` dictionary in `llm_load_test_runner.py`:\n\n```python\nbase_config = {\n "host": "https://your-llm-server.com/",\n "provider": "openai",\n "model": "your-model-name",\n "api-key": "your-api-key",\n "logprobs": 5,\n "run-time": "60s",\n "temperature": 1.0,\n}\n```\n\n### Test Parameters\n\nThe main test parameters you can adjust include:\n\n- **concurrent_users**: List of user counts to test (e.g., `[1, 2, 5, 10, 20, 50]`)\n- **max_tokens**: List of output token counts to test (e.g., `[1, 15, 30, 50]`)\n- **run-time**: Duration of each test (e.g., `"60s"`)\n- **prompt-tokens**: Number of tokens in the prompt (default: 4046)\n- **temperature**: Temperature parameter for generation (default: 1.0)\n\nExample of customizing test parameters:\n\n```python\nconcurrent_users = [1, 5, 10]\nmax_tokens = [10, 50, 100]\nbase_config["run-time"] = "120s"\n```\n\n## Understanding Results\n\n### Report Files\n\nAfter running tests, results are saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\n\n1. **Individual test results**: `results_test_u{users}_o{tokens}.csv`\n2. **Summary reports**:\n - `load_test_report_YYYYMMDD_HHMMSS.csv`\n - `load_test_report_YYYYMMDD_HHMMSS.json`\n\n### Metrics Explained\n\nThe test collects and reports the following metrics:\n\n- **Total Latency**: Total time from request to complete response (ms)\n- **Time To First Token**: Time until the first token is received (ms)\n- **Latency Per Token**: Average time to generate each token (ms)\n- **Num Tokens**: Number of tokens generated\n- **Num Requests**: Total number of requests processed\n- **Qps**: Queries per second (throughput)\n- **Prompt Tokens**: Number of tokens in the prompt\n- **Generation Tokens**: Number of tokens requested to generate\n\n## Visualization Tools\n\nThe project includes several visualization tools to help analyze test results.\n\n### Single Run Visualization\n\nGenerate visualizations for a single test run:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\nThis generates the following visualizations:\n- Latency by concurrency level\n- Throughput (QPS) by concurrency level\n- Latency by output token count\n- Heatmap of latency by concurrency and output tokens\n- Latency distribution\n- Token generation speed\n- Summary report\n\n### Run Comparison\n\nCompare results across multiple test runs:\n\n```bash\npython src/compare_runs.py --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\n```\n\nThis generates comparison visualizations showing:\n- Latency metrics comparison across runs\n- Throughput comparison across runs\n- Performance by concurrency level across runs\n- Performance by output token count across runs\n\n### Grafana Dashboard\n\nCreate a Grafana dashboard configuration for real-time monitoring:\n\n```bash\npython src/dashboard_generator.py --output-file grafana-dashboard.json\n```\n\nTo use the dashboard:\n1. Open Grafana in your web browser\n2. Navigate to Dashboards > Import\n3. Upload the generated JSON file or paste its contents\n4. Configure the data source if prompted\n5. Click Import\n\n## Advanced Usage\n\n### Custom Prompts\n\nYou can customize the prompt used for testing by modifying the `prompt` variable in `load_test.py`:\n\n```python\nprompt = """Your custom prompt text here"""\n```\n\nFor more complex scenarios, you can use a file with multiple prompts:\n\n```bash\npython src/llm_load_test_runner.py --prompt-text @prompts.jsonl\n```\n\n### Provider-Specific Options\n\nThe tool supports various LLM providers, each with specific configuration options:\n\n- **OpenAI/Compatible APIs**: Uses the standard OpenAI API format\n- **vLLM**: Optimized for vLLM servers with specific parameters\n- **Fireworks**: Includes Fireworks-specific optimizations\n- **Triton**: Support for Triton Inference Server\n\nProvider-specific options can be set in the `format_payload` method of each provider class in `load_test.py`.\n\n### Scaling Tests\n\nFor large-scale testing, consider:\n\n1. **Increasing run time**: Set longer test durations for more stable results\n ```python\n base_config["run-time"] = "300s" # 5 minutes per test\n ```\n\n2. **Testing with more concurrent users**: Add higher concurrency levels\n ```python\n concurrent_users = [1, 10, 50, 100, 200]\n ```\n\n3. **Using QPS mode**: Instead of fixed users, target a specific query rate\n ```bash\n python src/load_test.py --qps 10 --users 100\n ```\n\n## Troubleshooting\n\nCommon issues and solutions:\n\n1. **Connection errors**: Check your API key and server URL\n2. **Timeout errors**: Increase timeout settings or reduce concurrency\n3. **Inconsistent results**: Increase test duration for more stable metrics\n4. **Missing data in visualizations**: Ensure CSV files contain all expected columns\n5. **Memory errors**: For large datasets, consider filtering data or using a machine with more memory\n\n## API Reference\n\n### LLMLoadTest Class\n\nThe main class that orchestrates the load tests.\n\n**Methods**:\n- `__init__(base_config, results_dir)`: Initialize with configuration and results directory\n- `run_test(test_name, users, output_tokens)`: Run a single test with specified parameters\n- `write_test_report(...)`: Write test results to CSV and JSON files\n- `generate_report()`: Generate a summary report of all tests\n\n### ResultsVisualizer Class\n\nGenerates visualizations from test results.\n\n**Methods**:\n- `__init__(results_dir, output_dir)`: Initialize with results and output directories\n- `generate_all_visualizations()`: Generate all available visualizations\n- Various plotting methods for specific visualizations\n\n## Contributing\n\nContributions to the LLM Load Testing Tool are welcome! Here\'s how you can contribute:\n\n1. **Bug reports**: Open an issue describing the bug and steps to reproduce\n2. **Feature requests**: Open an issue describing the desired feature\n3. **Code contributions**: Fork the repository, make changes, and submit a pull request\n4. **Documentation improvements**: Submit pull requests with documentation updates\n\nPlease follow the existing code style and include tests for new features.\n', - file_name_rename: null, - application_details: "", + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01DzbsASaCkQXY2fmUYZjXbj", + ftm_content: + 'Problems:\nThe path "README.md" is ambiguous. It could be interpreted as:\n/home/fupfv/git/benchmark1_0701/README.md\n/home/fupfv/git/benchmark1_0701/llm_load_test/README.md\n', + }, + { + ftm_role: "assistant", + ftm_content: "\n\nLet me check both README files:", + ftm_tool_calls: [ + { + id: "toolu_01LUEfJUST74ghMbHUmuByr5", + function: { + arguments: + '{"paths": "/home/fupfv/git/benchmark1_0701/README.md", "skeleton": false}', + name: "cat", }, - ], - tool_call_id: "toolu_011nt32gEpSJZet4jGzrVXBt", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nNow, let's create a detailed guide for the visualization tools:", - ftm_tool_calls: [ - { - id: "toolu_01LCqsTPhkZq3mQ7eEz1SAMR", - function: { - arguments: - '{"path": "/home/fupfv/git/benchmark1_0701/docs/VISUALIZATION_GUIDE.md", "content": "# LLM Load Testing Visualization Guide\\n\\nThis guide provides detailed information about the visualization tools available in the LLM Load Testing Tool and how to use them effectively to analyze your benchmark results.\\n\\n## Table of Contents\\n\\n1. [Overview](#overview)\\n2. [Single Run Visualization](#single-run-visualization)\\n - [Usage](#usage)\\n - [Generated Visualizations](#generated-visualizations)\\n - [Interpreting Results](#interpreting-results)\\n3. [Run Comparison](#run-comparison)\\n - [Usage](#usage-1)\\n - [Comparison Metrics](#comparison-metrics)\\n - [Identifying Performance Changes](#identifying-performance-changes)\\n4. [Grafana Dashboard](#grafana-dashboard)\\n - [Setup](#setup)\\n - [Dashboard Panels](#dashboard-panels)\\n - [Real-time Monitoring](#real-time-monitoring)\\n5. [Custom Visualizations](#custom-visualizations)\\n6. [Best Practices](#best-practices)\\n7. [Troubleshooting](#troubleshooting)\\n\\n## Overview\\n\\nThe LLM Load Testing Tool includes three main visualization components:\\n\\n1. **visualize_results.py**: Generates comprehensive visualizations for a single test run\\n2. **compare_runs.py**: Compares results across multiple test runs\\n3. **dashboard_generator.py**: Creates Grafana dashboard configurations for real-time monitoring\\n\\nThese tools help you understand the performance characteristics of your LLM inference server under different load conditions and identify potential bottlenecks or optimization opportunities.\\n\\n## Single Run Visualization\\n\\nThe `visualize_results.py` script analyzes the results of a single test run and generates various charts and visualizations.\\n\\n### Usage\\n\\nBasic usage:\\n\\n```bash\\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\\n```\\n\\nOptions:\\n- `--results-dir`: Directory containing test results (default: results)\\n- `--output-dir`: Directory to save visualizations (default: results_dir/visualizations)\\n- `--run`: Specific run directory to analyze (default: analyze all runs)\\n\\n### Generated Visualizations\\n\\nThe script generates the following visualizations:\\n\\n#### 1. Latency by Concurrency Level\\n\\n![Latency by Concurrency](example_images/latency_by_concurrency.png)\\n\\nThis chart shows how different latency metrics (Total Latency, Time To First Token, and Latency Per Token) change as the number of concurrent users increases. It helps identify how your server\'s performance scales with load.\\n\\n#### 2. Throughput by Concurrency Level\\n\\n![Throughput by Concurrency](example_images/throughput_by_concurrency.png)\\n\\nThis bar chart displays the Queries Per Second (QPS) achieved at different concurrency levels. It helps determine the optimal concurrency level for maximum throughput.\\n\\n#### 3. Latency by Output Token Count\\n\\n![Latency by Output Tokens](example_images/latency_by_output_tokens.png)\\n\\nThis chart shows how latency metrics change with different output token counts. It helps understand the relationship between response size and latency.\\n\\n#### 4. Heatmap of Latency\\n\\n![Latency Heatmap](example_images/heatmap_latency.png)\\n\\nThis heatmap visualizes latency across different combinations of concurrency levels and output token counts. Darker colors typically indicate higher latency.\\n\\n#### 5. Latency Distribution\\n\\n![Latency Distribution](example_images/latency_distribution.png)\\n\\nThis histogram shows the distribution of total latency values, including mean and median lines. It helps identify outliers and understand the variability in response times.\\n\\n#### 6. Token Generation Speed\\n\\n![Token Generation Speed](example_images/token_generation_speed.png)\\n\\nThis chart shows the token generation speed (tokens per second) at different concurrency levels. It helps understand how token generation throughput scales with load.\\n\\n#### 7. Summary Report\\n\\nA markdown file containing key statistics and findings from the analysis, including:\\n- Total tests analyzed\\n- Average and median latency\\n- Average throughput\\n- Maximum concurrency tested\\n\\n### Interpreting Results\\n\\nWhen analyzing the visualizations, look for:\\n\\n1. **Scaling patterns**: How does latency increase with concurrency? Is there a point where throughput plateaus or decreases?\\n\\n2. **Bottlenecks**: Are there specific concurrency levels or token counts where performance degrades significantly?\\n\\n3. **Variability**: Is there high variance in latency? This might indicate inconsistent performance.\\n\\n4. **Token efficiency**: How does the token generation speed change with load? This indicates the model\'s efficiency under pressure.\\n\\n## Run Comparison\\n\\nThe `compare_runs.py` script compares results from different test runs to identify performance differences, regressions, or improvements.\\n\\n### Usage\\n\\nBasic usage:\\n\\n```bash\\npython src/compare_runs.py --base-dir results --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\\n```\\n\\nOptions:\\n- `--base-dir`: Base directory containing run directories (default: results)\\n- `--runs`: Specific run directories to compare (default: all runs)\\n- `--output-dir`: Directory to save comparison visualizations\\n\\n### Comparison Metrics\\n\\nThe script generates comparison visualizations for:\\n\\n#### 1. Latency Comparison\\n\\n![Latency Comparison](example_images/latency_comparison.png)\\n\\nThis chart compares total latency across different runs, helping identify performance improvements or regressions.\\n\\n#### 2. Throughput Comparison\\n\\n![Throughput Comparison](example_images/throughput_comparison.png)\\n\\nThis chart compares QPS across different runs, showing how throughput has changed.\\n\\n#### 3. Performance by Concurrency Level\\n\\n![Performance by Concurrency](example_images/performance_by_concurrency.png)\\n\\nThis chart shows how performance at different concurrency levels has changed across runs.\\n\\n#### 4. Performance by Output Token Count\\n\\n![Performance by Tokens](example_images/performance_by_tokens.png)\\n\\nThis chart shows how performance with different output token counts has changed across runs.\\n\\n#### 5. Summary Table\\n\\nA table showing key metrics for each run and the percentage change between runs.\\n\\n### Identifying Performance Changes\\n\\nWhen comparing runs, look for:\\n\\n1. **Consistent improvements**: Are latency reductions consistent across all concurrency levels and token counts?\\n\\n2. **Regression points**: Are there specific scenarios where performance has degraded?\\n\\n3. **Scaling changes**: Has the scaling behavior changed? For example, does the new version handle high concurrency better?\\n\\n4. **Throughput improvements**: Has the maximum achievable QPS increased?\\n\\n## Grafana Dashboard\\n\\nThe `dashboard_generator.py` script creates a Grafana dashboard configuration for real-time monitoring of load tests.\\n\\n### Setup\\n\\n1. Generate the dashboard configuration:\\n\\n```bash\\npython src/dashboard_generator.py --output-file grafana-dashboard.json\\n```\\n\\n2. Import into Grafana:\\n - Open Grafana in your web browser\\n - Navigate to Dashboards > Import\\n - Upload the generated JSON file or paste its contents\\n - Configure the data source if prompted\\n - Click Import\\n\\n### Dashboard Panels\\n\\nThe generated dashboard includes panels for:\\n\\n#### 1. Latency Metrics\\n\\nReal-time graphs of:\\n- Total Latency\\n- Time To First Token\\n- Latency Per Token\\n\\n#### 2. Throughput Metrics\\n\\nReal-time graphs of:\\n- Queries Per Second (QPS)\\n- Requests per minute\\n\\n#### 3. Token Metrics\\n\\nReal-time graphs of:\\n- Tokens generated\\n- Token generation speed (tokens/second)\\n\\n#### 4. Key Statistics\\n\\nStat panels showing:\\n- Average latency\\n- P95 latency\\n- Maximum QPS\\n- Success rate\\n\\n### Real-time Monitoring\\n\\nThe Grafana dashboard is particularly useful for:\\n\\n1. **Long-running tests**: Monitor performance over extended periods\\n\\n2. **Stability testing**: Identify performance degradation over time\\n\\n3. **Threshold alerts**: Set up alerts when metrics exceed thresholds\\n\\n4. **Sharing results**: Provide stakeholders with a live view of performance\\n\\n## Custom Visualizations\\n\\nYou can extend the visualization capabilities by modifying the existing scripts or creating new ones.\\n\\nTo add a new visualization to `visualize_results.py`:\\n\\n1. Add a new plotting method to the `ResultsVisualizer` class:\\n\\n```python\\ndef plot_custom_metric(self):\\n \\"\\"\\"Plot a custom metric visualization.\\"\\"\\"\\n plt.figure(figsize=(12, 8))\\n \\n # Your plotting code here\\n \\n plt.title(\'Custom Metric Visualization\')\\n plt.xlabel(\'X Label\')\\n plt.ylabel(\'Y Label\')\\n plt.grid(True)\\n plt.tight_layout()\\n plt.savefig(self.output_dir / \'custom_metric.png\')\\n plt.close()\\n```\\n\\n2. Add your new method to the `generate_all_visualizations` method:\\n\\n```python\\ndef generate_all_visualizations(self):\\n # Existing visualizations\\n self.plot_latency_by_concurrency()\\n # ...\\n \\n # Add your custom visualization\\n self.plot_custom_metric()\\n \\n self.generate_summary_report()\\n```\\n\\n## Best Practices\\n\\nFor effective visualization and analysis:\\n\\n1. **Consistent test parameters**: When comparing runs, ensure that the test parameters (prompt tokens, output tokens, etc.) are consistent.\\n\\n2. **Baseline comparison**: Always include a baseline run when comparing performance improvements.\\n\\n3. **Isolate variables**: When testing for specific improvements, change only one variable at a time.\\n\\n4. **Sufficient sample size**: Run tests long enough to collect a statistically significant number of samples.\\n\\n5. **Annotate changes**: Document what changed between different test runs for easier interpretation.\\n\\n6. **Regular benchmarking**: Establish a regular benchmarking cadence to track performance over time.\\n\\n7. **Export raw data**: Save raw data for custom analysis in other tools if needed.\\n\\n## Troubleshooting\\n\\nCommon visualization issues and solutions:\\n\\n1. **Missing data**: If visualizations show missing data, check that the CSV files contain the expected columns.\\n\\n2. **Pandas errors**: Ensure you have the latest version of pandas installed (`pip install --upgrade pandas`).\\n\\n3. **Matplotlib issues**: If plots don\'t render correctly, try updating matplotlib (`pip install --upgrade matplotlib`).\\n\\n4. **File not found**: Verify that the specified directories exist and contain the expected data files.\\n\\n5. **Memory errors**: For very large datasets, consider filtering the data or using a machine with more memory.\\n\\n6. **Inconsistent scales**: If comparing runs with vastly different performance characteristics, consider using log scales or normalizing the data.\\n\\n7. **Empty visualizations**: If a visualization is empty, check if the required data is present in the results files."}', - name: "create_textdoc", - }, - type: "function", - index: 0, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01LUEfJUST74ghMbHUmuByr5", + ftm_content: "Paths found:\n/home/fupfv/git/benchmark1_0701/README.md\n", + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ + { + file_name: "benchmark1_0701/README.md", + file_content: + "# LLM Load Testing Tool\n\nA tool for load testing LLM inference servers using Locust, with comprehensive visualization capabilities.\n\n## Setup\n\n1. Install dependencies:\n```bash\npip install -r requirements.txt\n```\n\n2. Run the test:\n```bash\npython src/llm_load_test_runner.py\n```\n\n3. Make visualization scripts executable (optional):\n```bash\nbash make_scripts_executable.sh\n```\n\n## Configuration\n\nThe test runs with these default parameters:\n- Users: 1, 2, 50\n- Output tokens: 15, 30\n- Run time: 60s per test\n- Prompt tokens: 4046\n- Temperature: 1.0\n\n## Reports\n\nReports are generated in the `results/run_YYYYMMDD_HHMMSS/` directory:\n- Individual test results: `results_test_*.csv`\n- Summary reports: \n - `load_test_report_YYYYMMDD_HHMMSS.csv`\n - `load_test_report_YYYYMMDD_HHMMSS.json`\n\n## Example Report Format\n\nThe CSV report includes:\n- Response metrics (total latency, first token latency)\n- Number of tokens generated\n- Characters generated\n- Requests per second\n- Success/failure rates\n\n## Visualization Tools\n\nThe project includes several visualization tools to help analyze test results:\n\n### 1. Single Run Visualization\n\nGenerate comprehensive visualizations for a single test run:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\n### 2. Run Comparison\n\nCompare results across multiple test runs:\n\n```bash\npython src/compare_runs.py --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\n```\n\n### 3. Grafana Dashboard Generator\n\nCreate a Grafana dashboard configuration for real-time monitoring:\n\n```bash\npython src/dashboard_generator.py --output-file grafana-dashboard.json\n```\n\nFor detailed information about the visualization tools, see [VISUALIZATION.md](VISUALIZATION.md).\n", + line1: 1, + line2: 75, + }, + ]), + }, + { + ftm_role: "assistant", + ftm_content: + "Let me check the VISUALIZATION.md file and the main source files to understand the project better:", + ftm_tool_calls: [ + { + id: "toolu_019uJg1SQ5evTU9bov6iS9hU", + function: { + arguments: + '{"paths": "/home/fupfv/git/benchmark1_0701/VISUALIZATION.md", "skeleton": false}', + name: "cat", }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: - "/home/fupfv/git/benchmark1_0701/docs/VISUALIZATION_GUIDE.md", - file_action: "edit", - line1: 1, - line2: 1, - lines_remove: "", - lines_add: - "# LLM Load Testing Visualization Guide\n\nThis guide provides detailed information about the visualization tools available in the LLM Load Testing Tool and how to use them effectively to analyze your benchmark results.\n\n## Table of Contents\n\n1. [Overview](#overview)\n2. [Single Run Visualization](#single-run-visualization)\n - [Usage](#usage)\n - [Generated Visualizations](#generated-visualizations)\n - [Interpreting Results](#interpreting-results)\n3. [Run Comparison](#run-comparison)\n - [Usage](#usage-1)\n - [Comparison Metrics](#comparison-metrics)\n - [Identifying Performance Changes](#identifying-performance-changes)\n4. [Grafana Dashboard](#grafana-dashboard)\n - [Setup](#setup)\n - [Dashboard Panels](#dashboard-panels)\n - [Real-time Monitoring](#real-time-monitoring)\n5. [Custom Visualizations](#custom-visualizations)\n6. [Best Practices](#best-practices)\n7. [Troubleshooting](#troubleshooting)\n\n## Overview\n\nThe LLM Load Testing Tool includes three main visualization components:\n\n1. **visualize_results.py**: Generates comprehensive visualizations for a single test run\n2. **compare_runs.py**: Compares results across multiple test runs\n3. **dashboard_generator.py**: Creates Grafana dashboard configurations for real-time monitoring\n\nThese tools help you understand the performance characteristics of your LLM inference server under different load conditions and identify potential bottlenecks or optimization opportunities.\n\n## Single Run Visualization\n\nThe `visualize_results.py` script analyzes the results of a single test run and generates various charts and visualizations.\n\n### Usage\n\nBasic usage:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\nOptions:\n- `--results-dir`: Directory containing test results (default: results)\n- `--output-dir`: Directory to save visualizations (default: results_dir/visualizations)\n- `--run`: Specific run directory to analyze (default: analyze all runs)\n\n### Generated Visualizations\n\nThe script generates the following visualizations:\n\n#### 1. Latency by Concurrency Level\n\n![Latency by Concurrency](example_images/latency_by_concurrency.png)\n\nThis chart shows how different latency metrics (Total Latency, Time To First Token, and Latency Per Token) change as the number of concurrent users increases. It helps identify how your server's performance scales with load.\n\n#### 2. Throughput by Concurrency Level\n\n![Throughput by Concurrency](example_images/throughput_by_concurrency.png)\n\nThis bar chart displays the Queries Per Second (QPS) achieved at different concurrency levels. It helps determine the optimal concurrency level for maximum throughput.\n\n#### 3. Latency by Output Token Count\n\n![Latency by Output Tokens](example_images/latency_by_output_tokens.png)\n\nThis chart shows how latency metrics change with different output token counts. It helps understand the relationship between response size and latency.\n\n#### 4. Heatmap of Latency\n\n![Latency Heatmap](example_images/heatmap_latency.png)\n\nThis heatmap visualizes latency across different combinations of concurrency levels and output token counts. Darker colors typically indicate higher latency.\n\n#### 5. Latency Distribution\n\n![Latency Distribution](example_images/latency_distribution.png)\n\nThis histogram shows the distribution of total latency values, including mean and median lines. It helps identify outliers and understand the variability in response times.\n\n#### 6. Token Generation Speed\n\n![Token Generation Speed](example_images/token_generation_speed.png)\n\nThis chart shows the token generation speed (tokens per second) at different concurrency levels. It helps understand how token generation throughput scales with load.\n\n#### 7. Summary Report\n\nA markdown file containing key statistics and findings from the analysis, including:\n- Total tests analyzed\n- Average and median latency\n- Average throughput\n- Maximum concurrency tested\n\n### Interpreting Results\n\nWhen analyzing the visualizations, look for:\n\n1. **Scaling patterns**: How does latency increase with concurrency? Is there a point where throughput plateaus or decreases?\n\n2. **Bottlenecks**: Are there specific concurrency levels or token counts where performance degrades significantly?\n\n3. **Variability**: Is there high variance in latency? This might indicate inconsistent performance.\n\n4. **Token efficiency**: How does the token generation speed change with load? This indicates the model's efficiency under pressure.\n\n## Run Comparison\n\nThe `compare_runs.py` script compares results from different test runs to identify performance differences, regressions, or improvements.\n\n### Usage\n\nBasic usage:\n\n```bash\npython src/compare_runs.py --base-dir results --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\n```\n\nOptions:\n- `--base-dir`: Base directory containing run directories (default: results)\n- `--runs`: Specific run directories to compare (default: all runs)\n- `--output-dir`: Directory to save comparison visualizations\n\n### Comparison Metrics\n\nThe script generates comparison visualizations for:\n\n#### 1. Latency Comparison\n\n![Latency Comparison](example_images/latency_comparison.png)\n\nThis chart compares total latency across different runs, helping identify performance improvements or regressions.\n\n#### 2. Throughput Comparison\n\n![Throughput Comparison](example_images/throughput_comparison.png)\n\nThis chart compares QPS across different runs, showing how throughput has changed.\n\n#### 3. Performance by Concurrency Level\n\n![Performance by Concurrency](example_images/performance_by_concurrency.png)\n\nThis chart shows how performance at different concurrency levels has changed across runs.\n\n#### 4. Performance by Output Token Count\n\n![Performance by Tokens](example_images/performance_by_tokens.png)\n\nThis chart shows how performance with different output token counts has changed across runs.\n\n#### 5. Summary Table\n\nA table showing key metrics for each run and the percentage change between runs.\n\n### Identifying Performance Changes\n\nWhen comparing runs, look for:\n\n1. **Consistent improvements**: Are latency reductions consistent across all concurrency levels and token counts?\n\n2. **Regression points**: Are there specific scenarios where performance has degraded?\n\n3. **Scaling changes**: Has the scaling behavior changed? For example, does the new version handle high concurrency better?\n\n4. **Throughput improvements**: Has the maximum achievable QPS increased?\n\n## Grafana Dashboard\n\nThe `dashboard_generator.py` script creates a Grafana dashboard configuration for real-time monitoring of load tests.\n\n### Setup\n\n1. Generate the dashboard configuration:\n\n```bash\npython src/dashboard_generator.py --output-file grafana-dashboard.json\n```\n\n2. Import into Grafana:\n - Open Grafana in your web browser\n - Navigate to Dashboards > Import\n - Upload the generated JSON file or paste its contents\n - Configure the data source if prompted\n - Click Import\n\n### Dashboard Panels\n\nThe generated dashboard includes panels for:\n\n#### 1. Latency Metrics\n\nReal-time graphs of:\n- Total Latency\n- Time To First Token\n- Latency Per Token\n\n#### 2. Throughput Metrics\n\nReal-time graphs of:\n- Queries Per Second (QPS)\n- Requests per minute\n\n#### 3. Token Metrics\n\nReal-time graphs of:\n- Tokens generated\n- Token generation speed (tokens/second)\n\n#### 4. Key Statistics\n\nStat panels showing:\n- Average latency\n- P95 latency\n- Maximum QPS\n- Success rate\n\n### Real-time Monitoring\n\nThe Grafana dashboard is particularly useful for:\n\n1. **Long-running tests**: Monitor performance over extended periods\n\n2. **Stability testing**: Identify performance degradation over time\n\n3. **Threshold alerts**: Set up alerts when metrics exceed thresholds\n\n4. **Sharing results**: Provide stakeholders with a live view of performance\n\n## Custom Visualizations\n\nYou can extend the visualization capabilities by modifying the existing scripts or creating new ones.\n\nTo add a new visualization to `visualize_results.py`:\n\n1. Add a new plotting method to the `ResultsVisualizer` class:\n\n```python\ndef plot_custom_metric(self):\n \"\"\"Plot a custom metric visualization.\"\"\"\n plt.figure(figsize=(12, 8))\n \n # Your plotting code here\n \n plt.title('Custom Metric Visualization')\n plt.xlabel('X Label')\n plt.ylabel('Y Label')\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(self.output_dir / 'custom_metric.png')\n plt.close()\n```\n\n2. Add your new method to the `generate_all_visualizations` method:\n\n```python\ndef generate_all_visualizations(self):\n # Existing visualizations\n self.plot_latency_by_concurrency()\n # ...\n \n # Add your custom visualization\n self.plot_custom_metric()\n \n self.generate_summary_report()\n```\n\n## Best Practices\n\nFor effective visualization and analysis:\n\n1. **Consistent test parameters**: When comparing runs, ensure that the test parameters (prompt tokens, output tokens, etc.) are consistent.\n\n2. **Baseline comparison**: Always include a baseline run when comparing performance improvements.\n\n3. **Isolate variables**: When testing for specific improvements, change only one variable at a time.\n\n4. **Sufficient sample size**: Run tests long enough to collect a statistically significant number of samples.\n\n5. **Annotate changes**: Document what changed between different test runs for easier interpretation.\n\n6. **Regular benchmarking**: Establish a regular benchmarking cadence to track performance over time.\n\n7. **Export raw data**: Save raw data for custom analysis in other tools if needed.\n\n## Troubleshooting\n\nCommon visualization issues and solutions:\n\n1. **Missing data**: If visualizations show missing data, check that the CSV files contain the expected columns.\n\n2. **Pandas errors**: Ensure you have the latest version of pandas installed (`pip install --upgrade pandas`).\n\n3. **Matplotlib issues**: If plots don't render correctly, try updating matplotlib (`pip install --upgrade matplotlib`).\n\n4. **File not found**: Verify that the specified directories exist and contain the expected data files.\n\n5. **Memory errors**: For very large datasets, consider filtering the data or using a machine with more memory.\n\n6. **Inconsistent scales**: If comparing runs with vastly different performance characteristics, consider using log scales or normalizing the data.\n\n7. **Empty visualizations**: If a visualization is empty, check if the required data is present in the results files.\n", - file_name_rename: null, - application_details: "", + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_019uJg1SQ5evTU9bov6iS9hU", + ftm_content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/VISUALIZATION.md\n", + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ + { + file_name: "VISUALIZATION.md", + file_content: + "# LLM Benchmark Visualization Tools\n\nThis document describes the visualization tools available for analyzing the results of LLM load tests.\n\n## Available Visualization Tools\n\nThe project includes three main visualization tools:\n\n1. **visualize_results.py** - Generate visualizations for a single test run\n2. **compare_runs.py** - Compare results across multiple test runs\n3. **dashboard_generator.py** - Generate Grafana dashboard configurations for real-time monitoring\n\n## Prerequisites\n\nInstall the required dependencies:\n\n```bash\npip install pandas matplotlib seaborn numpy\n```\n\nFor Grafana dashboards, you'll need to have Grafana installed and configured.\n\n## 1. Visualize Results\n\nThe `visualize_results.py` script generates various charts and visualizations from a single test run.\n\n### Usage\n\n```bash\npython src/visualize_results.py --results-dir results/run_20250129_174215 --output-dir visualizations\n```\n\n### Parameters\n\n- `--results-dir`: Directory containing test results (default: results)\n- `--output-dir`: Directory to save visualizations (default: results_dir/visualizations)\n- `--run`: Specific run directory to analyze (default: analyze all runs)\n\n### Generated Visualizations\n\n- Latency by concurrency level\n- Throughput (QPS) by concurrency level\n- Latency by output token count\n- Heatmap of latency by concurrency and output tokens\n- Model comparison (if multiple models)\n- Run comparison (if multiple runs)\n- Latency distribution\n- Token generation speed\n- Summary report (markdown)\n\n## 2. Compare Runs\n\nThe `compare_runs.py` script compares results from different test runs to identify performance differences, regressions, or improvements.\n\n### Usage\n\n```bash\npython src/compare_runs.py --base-dir results --runs run_20250129_174215 run_20250129_170950 --output-dir comparisons\n```\n\n### Parameters\n\n- `--base-dir`: Base directory containing run directories (default: results)\n- `--runs`: Specific run directories to compare (default: all runs)\n- `--output-dir`: Directory to save comparison visualizations\n\n### Generated Comparisons\n\n- Latency metrics comparison across runs\n- Throughput comparison across runs\n- Performance by concurrency level across runs\n- Performance by output token count across runs\n- Heatmap comparisons\n- Summary table with key metrics\n- Improvement metrics (when comparing multiple runs)\n\n## 3. Dashboard Generator\n\nThe `dashboard_generator.py` script generates a Grafana dashboard configuration from test results for real-time monitoring.\n\n### Usage\n\n```bash\npython src/dashboard_generator.py --results-dir results --output-file grafana-dashboard.json\n```\n\n### Parameters\n\n- `--results-dir`: Directory containing test results (default: results)\n- `--output-file`: Path to save the dashboard JSON file\n\n### Dashboard Features\n\n- Real-time latency metrics\n- Throughput metrics\n- Token metrics\n- Token generation speed\n- Key statistics\n- Model comparison\n\n### Importing to Grafana\n\n1. Open Grafana in your web browser\n2. Navigate to Dashboards > Import\n3. Upload the generated JSON file or paste its contents\n4. Configure the data source if prompted\n5. Click Import\n\n## Example Workflow\n\n1. Run load tests with different configurations:\n ```bash\n python src/llm_load_test_runner.py\n ```\n\n2. Generate visualizations for a specific run:\n ```bash\n python src/visualize_results.py --results-dir results/run_20250129_174215\n ```\n\n3. Compare multiple runs:\n ```bash\n python src/compare_runs.py --runs run_20250129_174215 run_20250129_170950\n ```\n\n4. Generate a Grafana dashboard:\n ```bash\n python src/dashboard_generator.py --output-file grafana-dashboard.json\n ```\n\n## Tips for Effective Analysis\n\n1. **Consistent Test Parameters**: When comparing runs, ensure that the test parameters (prompt tokens, output tokens, etc.) are consistent.\n\n2. **Baseline Comparison**: Always include a baseline run when comparing performance improvements.\n\n3. **Isolate Variables**: When testing for specific improvements, change only one variable at a time.\n\n4. **Real-time Monitoring**: Use the Grafana dashboard for real-time monitoring during long-running tests.\n\n5. **Anomaly Detection**: Look for outliers in the latency distribution charts to identify potential issues.\n\n6. **Scaling Analysis**: Pay special attention to how performance metrics change with increasing concurrency levels.\n\n## Troubleshooting\n\n- **Missing Data**: If visualizations show missing data, check that the CSV files contain the expected columns.\n\n- **Pandas Errors**: Ensure you have the latest version of pandas installed (`pip install --upgrade pandas`).\n\n- **Matplotlib Issues**: If plots don't render correctly, try updating matplotlib (`pip install --upgrade matplotlib`).\n\n- **File Not Found**: Verify that the specified directories exist and contain the expected data files.\n\n- **Memory Errors**: For very large datasets, consider filtering the data or using a machine with more memory.\n", + line1: 1, + line2: 154, + }, + ]), + }, + { + ftm_role: "assistant", + ftm_content: + "Now let me examine the main source files to understand how the tool works:", + ftm_tool_calls: [ + { + id: "toolu_01T48o7umn3FG5JVJWRgviUP", + function: { + arguments: + '{"paths": "/home/fupfv/git/benchmark1_0701/src/llm_load_test_runner.py", "skeleton": false}', + name: "cat", }, - ], - tool_call_id: "toolu_01LCqsTPhkZq3mQ7eEz1SAMR", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\n\n\nNow, let's create a technical guide for developers who want to extend the tool:", - ftm_tool_calls: [ - { - id: "toolu_018ik5qU9MGsUpZY4Bn5f33o", - function: { - arguments: - '{"path": "/home/fupfv/git/benchmark1_0701/docs/DEVELOPER_GUIDE.md", "content": "# LLM Load Testing Tool Developer Guide\\n\\nThis guide is intended for developers who want to extend, modify, or contribute to the LLM Load Testing Tool. It provides technical details about the codebase, architecture, and guidelines for development.\\n\\n## Table of Contents\\n\\n1. [Architecture Overview](#architecture-overview)\\n2. [Core Components](#core-components)\\n3. [Adding New Features](#adding-new-features)\\n - [Adding a New Provider](#adding-a-new-provider)\\n - [Adding New Metrics](#adding-new-metrics)\\n - [Adding New Visualizations](#adding-new-visualizations)\\n4. [Testing Guidelines](#testing-guidelines)\\n5. [Code Style and Conventions](#code-style-and-conventions)\\n6. [Contributing Guidelines](#contributing-guidelines)\\n\\n## Architecture Overview\\n\\nThe LLM Load Testing Tool is built with a modular architecture that separates the concerns of test execution, data collection, and visualization. The main components are:\\n\\n1. **Test Runner**: Orchestrates the execution of load tests with different parameters.\\n2. **Load Test Implementation**: Uses Locust to generate load and collect metrics.\\n3. **Results Processing**: Parses and processes the raw test results.\\n4. **Visualization Tools**: Generate charts and reports from the processed results.\\n\\nThe data flow through the system is as follows:\\n\\n```\\nTest Configuration → Test Runner → Load Test Implementation → Raw Results → Results Processing → Visualizations\\n```\\n\\n## Core Components\\n\\n### 1. llm_load_test_runner.py\\n\\nThis is the main entry point for running load tests. It:\\n- Configures test parameters\\n- Creates a results directory\\n- Runs tests with different combinations of users and output tokens\\n- Generates summary reports\\n\\nKey classes and methods:\\n- `LLMLoadTest`: Main class for orchestrating tests\\n - `run_test(test_name, users, output_tokens)`: Runs a single test\\n - `write_test_report(...)`: Writes test results to files\\n - `parse_output(output)`: Parses metrics from test output\\n - `generate_report()`: Generates a summary report\\n\\n### 2. load_test.py\\n\\nThis file contains the Locust implementation for generating load. It:\\n- Defines user behavior for load testing\\n- Implements different provider classes for various LLM APIs\\n- Collects and reports metrics\\n\\nKey classes:\\n- `LLMUser`: Locust user class that sends requests to the LLM server\\n- `BaseProvider`: Abstract base class for LLM providers\\n - `OpenAIProvider`, `VllmProvider`, etc.: Provider-specific implementations\\n- `LengthSampler`: Utility for sampling token lengths\\n- `FixedQPSPacer`: Utility for controlling request rate\\n\\n### 3. llm_test_logger.py\\n\\nHandles logging of test results and details.\\n\\n### 4. visualize_results.py\\n\\nGenerates visualizations from test results. Key components:\\n- `ResultsVisualizer`: Main class for generating visualizations\\n - Various plotting methods for different metrics\\n - `generate_all_visualizations()`: Generates all visualizations\\n\\n### 5. compare_runs.py\\n\\nCompares results from different test runs.\\n\\n### 6. dashboard_generator.py\\n\\nGenerates Grafana dashboard configurations.\\n\\n## Adding New Features\\n\\n### Adding a New Provider\\n\\nTo add support for a new LLM provider:\\n\\n1. Create a new provider class in `load_test.py` that inherits from `BaseProvider`:\\n\\n```python\\nclass NewProvider(BaseProvider):\\n DEFAULT_MODEL_NAME = \\"default-model-name\\" # Optional default model name\\n \\n def get_url(self):\\n \\"\\"\\"Return the API endpoint URL.\\"\\"\\"\\n return \\"/api/endpoint\\"\\n \\n def format_payload(self, prompt, max_tokens, images):\\n \\"\\"\\"Format the request payload for this provider.\\"\\"\\"\\n data = {\\n \\"model\\": self.model,\\n \\"prompt\\": prompt,\\n \\"max_tokens\\": max_tokens,\\n # Provider-specific parameters\\n \\"provider_param\\": \\"value\\"\\n }\\n return data\\n \\n def parse_output_json(self, data, prompt):\\n \\"\\"\\"Parse the response from this provider.\\"\\"\\"\\n # Extract text, token counts, etc.\\n text = data.get(\\"output\\", \\"\\")\\n tokens = data.get(\\"token_count\\", 0)\\n \\n return ChunkMetadata(\\n text=text,\\n logprob_tokens=None,\\n usage_tokens=tokens,\\n prompt_usage_tokens=None\\n )\\n```\\n\\n2. Add the provider to the `PROVIDER_CLASS_MAP` dictionary:\\n\\n```python\\nPROVIDER_CLASS_MAP = {\\n # Existing providers\\n \\"openai\\": OpenAIProvider,\\n \\"vllm\\": VllmProvider,\\n # Add your new provider\\n \\"new_provider\\": NewProvider,\\n}\\n```\\n\\n### Adding New Metrics\\n\\nTo add a new metric to track:\\n\\n1. Modify the `LLMUser.generate_text` method in `load_test.py` to collect the new metric:\\n\\n```python\\n@task\\ndef generate_text(self):\\n # Existing code...\\n \\n # Add your new metric calculation\\n new_metric_value = calculate_new_metric(response, prompt)\\n \\n # Add the custom metric\\n add_custom_metric(\\"new_metric_name\\", new_metric_value)\\n```\\n\\n2. Update the `write_test_report` method in `llm_load_test_runner.py` to include the new metric:\\n\\n```python\\ndef write_test_report(self, test_name, response_text, duration, exit_code, prompt_tokens, provider=None, model=None, generation_tokens=None, stream=None, temperature=None, logprobs=None, concurrency=None, time_to_first_token=None, latency_per_token=None, num_tokens=None, total_latency=None, num_requests=None, qps=None, new_metric=None):\\n # Add the new metric to report_data\\n report_data = {\\n # Existing fields...\\n \\"New Metric\\": new_metric,\\n }\\n \\n # Update CSV writer to include the new field\\n writer.writerow([\\"Response received\\", ..., \\"New Metric\\", ...])\\n writer.writerow([response_text, ..., new_metric, ...])\\n```\\n\\n### Adding New Visualizations\\n\\nTo add a new visualization:\\n\\n1. Add a new plotting method to the `ResultsVisualizer` class in `visualize_results.py`:\\n\\n```python\\ndef plot_new_visualization(self):\\n \\"\\"\\"Plot a new visualization.\\"\\"\\"\\n if \'required_column\' not in self.data.columns:\\n print(\\"Required column not found for new visualization\\")\\n return\\n \\n plt.figure(figsize=(12, 8))\\n \\n # Your plotting code here\\n sns.lineplot(x=\'Concurrency\', y=\'new_metric\', data=self.data)\\n \\n plt.title(\'New Metric Visualization\')\\n plt.xlabel(\'Concurrent Users\')\\n plt.ylabel(\'New Metric\')\\n plt.grid(True)\\n plt.tight_layout()\\n plt.savefig(self.output_dir / \'new_visualization.png\')\\n plt.close()\\n```\\n\\n2. Add your new method to the `generate_all_visualizations` method:\\n\\n```python\\ndef generate_all_visualizations(self):\\n # Existing visualizations\\n self.plot_latency_by_concurrency()\\n # ...\\n \\n # Add your new visualization\\n self.plot_new_visualization()\\n \\n self.generate_summary_report()\\n```\\n\\n## Testing Guidelines\\n\\nWhen developing new features or fixing bugs, follow these testing guidelines:\\n\\n1. **Unit Tests**: Write unit tests for new functionality, especially for data processing and parsing logic.\\n\\n2. **Integration Tests**: Test the integration between components, such as running a test and verifying that the results are correctly processed.\\n\\n3. **Manual Testing**: For visualizations and UI components, perform manual testing to ensure they render correctly.\\n\\n4. **Test with Different Providers**: Ensure that new features work with all supported providers.\\n\\n5. **Test with Different Parameters**: Verify that the tool works correctly with different combinations of users, tokens, and other parameters.\\n\\n6. **Error Handling**: Test error cases to ensure the tool gracefully handles failures.\\n\\n## Code Style and Conventions\\n\\nFollow these style guidelines when contributing to the codebase:\\n\\n1. **PEP 8**: Follow the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code.\\n\\n2. **Docstrings**: Use docstrings to document classes, methods, and functions. Follow the [Google style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstrings.\\n\\n3. **Type Hints**: Use type hints to indicate parameter and return types.\\n\\n4. **Naming Conventions**:\\n - Classes: `CamelCase`\\n - Functions and methods: `snake_case`\\n - Variables: `snake_case`\\n - Constants: `UPPER_SNAKE_CASE`\\n\\n5. **Comments**: Add comments to explain complex logic or non-obvious behavior.\\n\\n6. **Imports**: Organize imports in the following order:\\n - Standard library imports\\n - Third-party library imports\\n - Local application imports\\n\\n## Contributing Guidelines\\n\\nTo contribute to the LLM Load Testing Tool:\\n\\n1. **Fork the Repository**: Create a fork of the repository on GitHub.\\n\\n2. **Create a Branch**: Create a branch for your feature or bug fix.\\n\\n3. **Make Changes**: Implement your changes following the code style guidelines.\\n\\n4. **Write Tests**: Add tests for your changes to ensure they work correctly.\\n\\n5. **Update Documentation**: Update the documentation to reflect your changes.\\n\\n6. **Submit a Pull Request**: Submit a pull request with your changes.\\n\\n7. **Code Review**: Address any feedback from the code review.\\n\\n8. **Merge**: Once approved, your changes will be merged into the main branch.\\n\\n### Commit Message Guidelines\\n\\nFollow these guidelines for commit messages:\\n\\n- Use the present tense (\\"Add feature\\" not \\"Added feature\\")\\n- Use the imperative mood (\\"Move cursor to...\\" not \\"Moves cursor to...\\")\\n- Limit the first line to 72 characters or less\\n- Reference issues and pull requests liberally after the first line\\n\\nExample:\\n```\\nAdd support for XYZ provider\\n\\nThis commit adds support for the XYZ provider, including:\\n- Implementation of XYZProvider class\\n- Unit tests for XYZ provider\\n- Documentation updates\\n\\nFixes #123\\n```\\n\\nThank you for contributing to the LLM Load Testing Tool!"}', - name: "create_textdoc", - }, - type: "function", - index: 0, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01T48o7umn3FG5JVJWRgviUP", + ftm_content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/src/llm_load_test_runner.py\n", + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ + { + file_name: "benchmark1_0701/src/llm_load_test_runner.py", + file_content: + 'import subprocess\nimport time\nimport csv\nimport json\nimport os\nimport sys\nfrom datetime import datetime\nfrom llm_test_logger import LLMTestLogger\n\nclass LLMLoadTest:\n def __init__(self, base_config, results_dir):\n self.base_config = base_config\n self.results_dir = results_dir\n self.results = []\n self.logger = LLMTestLogger(os.path.join(results_dir, "detailed_logs"))\n\n # Create the results directory if it doesn\'t exist\n os.makedirs(self.results_dir, exist_ok=True)\n\n def write_test_report(self, test_name, response_text, duration, exit_code, prompt_tokens, provider=None, model=None, generation_tokens=None, stream=None, temperature=None, logprobs=None, concurrency=None, time_to_first_token=None, latency_per_token=None, num_tokens=None, total_latency=None, num_requests=None, qps=None):\n timestamp = datetime.now().isoformat()\n report_data = {\n "Response received": response_text,\n "test_name": test_name,\n "duration": duration,\n "exit_code": exit_code,\n "Prompt Tokens": prompt_tokens,\n "Provider": provider,\n "Model": model,\n "Generation Tokens": generation_tokens,\n "Stream": stream,\n "Temperature": temperature,\n "Logprobs": logprobs,\n "Concurrency": concurrency,\n "Time To First Token": time_to_first_token,\n "Latency Per Token": latency_per_token,\n "Num Tokens": num_tokens,\n "Total Latency": total_latency,\n "Num Requests": num_requests,\n "Qps": qps,\n "_timestamp": timestamp\n }\n\n # Write JSON report\n json_report_path = os.path.join(self.results_dir, "load_test_report_" + timestamp.replace(":", "") + ".json")\n with open(json_report_path, "w") as f:\n json.dump([report_data], f, indent=2)\n\n # Write CSV report\n csv_report_path = os.path.join(self.results_dir, "load_test_report_" + timestamp.replace(":", "") + ".csv")\n with open(csv_report_path, "w", newline="") as f:\n writer = csv.writer(f)\n writer.writerow(["Response received", "Provider", "Model", "Prompt Tokens", "Generation Tokens", \n "Stream", "Temperature", "Logprobs", "Concurrency", "Time To First Token",\n "Latency Per Token", "Num Tokens", "Total Latency", "Num Requests", "Qps",\n "test_name", "duration", "exit_code"])\n writer.writerow([response_text, provider, model, prompt_tokens, generation_tokens,\n stream, temperature, logprobs, concurrency, time_to_first_token,\n latency_per_token, num_tokens, total_latency, num_requests, qps,\n test_name, duration, exit_code])\n\n def run_test(self, test_name, users, output_tokens):\n print(f"Running test: {test_name}")\n \n # Store max_tokens in base_config for later use in parse_output\n self.base_config[\'max-tokens\'] = output_tokens\n \n # Construct the command with additional parameters to ensure exact token count and proper test duration\n command = (f"locust -f {os.path.join(os.path.dirname(__file__), \'load_test.py\')} --headless "\n f"--host {self.base_config[\'host\']} "\n f"--provider {self.base_config[\'provider\']} "\n f"--model {self.base_config[\'model\']} "\n f"--api-key {self.base_config[\'api-key\']} "\n f"--logprobs {self.base_config[\'logprobs\']} "\n f"--run-time {self.base_config.get(\'run-time\', \'1m\')} "\n f"--users {users} "\n f"--spawn-rate {users} "\n f"--prompt-tokens {self.base_config.get(\'prompt-tokens\', 4046)} "\n f"--max-tokens {output_tokens} "\n f"--temperature {self.base_config.get(\'temperature\', 1.0)} "\n f"--expect-workers 1 " # Ensure proper worker initialization\n f"--stop-timeout 60 " # Increased timeout to match run-time\n f"--summary-file {self.results_dir}/results_{test_name}.csv "\n f"--no-stream " # Changed from --stream false to --no-stream\n f"--exit-code-on-error 1") # Exit with error code on failure\n print(f"Command: {command}")\n \n # Run the command and capture output\n start_time = time.time()\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n \n stdout_data = []\n stderr_data = []\n \n # Process output in real-time and ensure minimum runtime\n while True:\n # Read from stdout and stderr\n stdout_line = process.stdout.readline()\n stderr_line = process.stderr.readline()\n \n if stdout_line:\n print(stdout_line.strip())\n stdout_data.append(stdout_line)\n if stderr_line:\n print(stderr_line.strip())\n stderr_data.append(stderr_line)\n \n # Check if process has finished\n if process.poll() is not None:\n # Read any remaining output\n remaining_stdout, remaining_stderr = process.communicate()\n if remaining_stdout:\n stdout_data.append(remaining_stdout)\n if remaining_stderr:\n stderr_data.append(remaining_stderr)\n break\n \n # Check elapsed time\n elapsed_time = time.time() - start_time\n min_runtime = float(self.base_config.get(\'run-time\', \'30\').rstrip(\'s\'))\n \n if elapsed_time < min_runtime:\n time.sleep(0.1) # Small sleep to prevent CPU spinning\n continue\n \n duration = time.time() - start_time\n return_code = process.poll()\n \n # Ensure the test ran for the minimum duration\n if duration < float(self.base_config.get(\'run-time\', \'30\').rstrip(\'s\')):\n print(f"WARNING: Test duration {duration:.2f}s was shorter than requested {self.base_config.get(\'run-time\')}")\n return_code = 1\n \n # Parse metrics from output\n output = \'\'.join(stdout_data)\n metrics = self.parse_output(output)\n \n if metrics:\n metrics.update({\n \'test_name\': test_name,\n \'duration\': duration,\n \'exit_code\': return_code,\n \'Prompt Tokens\': self.base_config.get(\'prompt-tokens\', 4046),\n \'Concurrency\': users\n })\n self.results.append(metrics)\n \n # Write individual test report\n self.write_test_report(\n test_name=test_name,\n response_text=metrics.get(\'Response received\', \'\'),\n duration=duration,\n exit_code=return_code,\n prompt_tokens=metrics.get(\'Prompt Tokens\'),\n provider=metrics.get(\'Provider\'),\n model=metrics.get(\'Model\'),\n generation_tokens=metrics.get(\'Generation Tokens\'),\n stream=metrics.get(\'Stream\'),\n temperature=metrics.get(\'Temperature\'),\n logprobs=metrics.get(\'Logprobs\'),\n concurrency=metrics.get(\'Concurrency\'),\n time_to_first_token=metrics.get(\'Time To First Token\'),\n latency_per_token=metrics.get(\'Latency Per Token\'),\n num_tokens=metrics.get(\'Num Tokens\'),\n total_latency=metrics.get(\'Total Latency\'),\n num_requests=metrics.get(\'Num Requests\'),\n qps=metrics.get(\'Qps\')\n )\n\n def _parse_response(response_json):\n # First try usage.completion_tokens\n if \'usage\' in response_json and \'completion_tokens\' in response_json[\'usage\']:\n tokens = response_json[\'usage\'][\'completion_tokens\']\n # Then try generated_tokens_n\n elif \'generated_tokens_n\' in response_json:\n tokens = response_json[\'generated_tokens_n\']\n else:\n tokens = 0 # fallback if no token count available\n \n # Extract text from choices\n text = ""\n if \'choices\' in response_json and len(response_json[\'choices\']) > 0:\n if \'text\' in response_json[\'choices\'][0]:\n text = response_json[\'choices\'][0][\'text\']\n \n return {\n \'tokens\': tokens,\n \'text\': text,\n \'chars\': len(text) if text else 0\n }\n\n def process_completion_response(response, start_time):\n try:\n response_json = response.json()\n parsed = _parse_response(response_json)\n \n end_time = time.time()\n total_time = (end_time - start_time) * 1000 # Convert to milliseconds\n \n return {\n \'total_latency\': total_time,\n \'first_token_latency\': total_time, # Since we\'re not streaming, they\'re the same\n \'num_tokens\': parsed[\'tokens\'],\n \'text\': parsed[\'text\'],\n \'chars\': parsed[\'chars\']\n }\n \n except Exception as e:\n print(f"Error processing response: {e}")\n return None\n\n def parse_output(self, output):\n metrics = {}\n response_line = None\n \n for line in output.split(\'\\n\'):\n # Capture the response metrics line\n if line.startswith("Response received:"):\n response_line = line.strip()\n metrics[\'Response received\'] = response_line\n \n # Parse the response metrics\n if "total" in line and "first token" in line:\n try:\n # Extract total time\n total_time = float(line.split("total")[1].split("ms")[0].strip())\n metrics[\'Total Latency\'] = total_time\n \n # Extract first token time\n first_token = float(line.split("first token")[1].split("ms")[0].strip())\n metrics[\'Time To First Token\'] = first_token\n \n # Extract number of tokens\n tokens = int(line.split("tokens")[0].split(",")[-1].strip())\n metrics[\'Num Tokens\'] = tokens\n \n # Calculate latency per token\n if tokens > 0:\n latency_per_token = (total_time - first_token) / tokens\n metrics[\'Latency Per Token\'] = latency_per_token\n except (ValueError, IndexError) as e:\n print(f"Warning: Failed to parse metrics from line: {line}")\n print(f"Error: {str(e)}")\n \n # Parse other metrics from the stats table\n elif "POST" in line and "/v1/completions" in line:\n parts = [p.strip() for p in line.split("|") if p.strip()]\n if len(parts) >= 4:\n try:\n metrics[\'Num Requests\'] = int(parts[1].split()[0])\n qps = float(parts[-1].split()[0])\n metrics[\'Qps\'] = qps\n except (ValueError, IndexError) as e:\n print(f"Warning: Failed to parse POST metrics: {line}")\n print(f"Error: {str(e)}")\n \n # Parse provider and model info\n elif "Provider" in line and "using model" in line:\n try:\n parts = line.split("Provider")[1].split("using model")\n metrics[\'Provider\'] = parts[0].strip().strip("*")\n metrics[\'Model\'] = parts[1].strip().strip("*")\n except IndexError as e:\n print(f"Warning: Failed to parse provider/model info: {line}")\n print(f"Error: {str(e)}")\n \n # Add configuration metrics\n metrics[\'Stream\'] = False # Changed from hardcoded \'True\' to match actual config\n metrics[\'Temperature\'] = 1.0\n metrics[\'Logprobs\'] = 5\n metrics[\'Generation Tokens\'] = metrics.get(\'Num Tokens\', 50) # Default to max tokens if not found\n \n return metrics\n\n def generate_report(self):\n if not self.results:\n print("Warning: No results to generate report from")\n return\n\n timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")\n csv_filename = os.path.join(self.results_dir, f\'load_test_report_{timestamp}.csv\')\n json_filename = os.path.join(self.results_dir, f\'load_test_report_{timestamp}.json\')\n\n fieldnames = [\n \'Response received\', \'Provider\', \'Model\', \'Prompt Tokens\', \'Generation Tokens\',\n \'Stream\', \'Temperature\', \'Logprobs\', \'Concurrency\', \'Time To First Token\',\n \'Latency Per Token\', \'Num Tokens\', \'Total Latency\', \'Num Requests\', \'Qps\',\n \'test_name\', \'duration\', \'exit_code\'\n ]\n\n # Ensure all numeric fields are properly formatted\n for result in self.results:\n for field in [\'Time To First Token\', \'Latency Per Token\', \'Num Tokens\', \n \'Total Latency\', \'Num Requests\', \'Qps\']:\n if field in result and result[field] is not None:\n if isinstance(result[field], (int, float)):\n result[field] = f"{result[field]:.2f}" if isinstance(result[field], float) else str(result[field])\n\n with open(csv_filename, \'w\', newline=\'\') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for result in self.results:\n row = {k: (result.get(k, \'N/A\') if result.get(k) is not None else \'N/A\') \n for k in fieldnames}\n writer.writerow(row)\n\n with open(json_filename, \'w\') as f:\n json.dump(self.results, f, indent=2)\n\n print(f"Reports generated: {csv_filename} and {json_filename}")\n\ndef main():\n base_config = {\n "host": "https://dogfood.pilot.smallcloud.ai/",\n "provider": "openai",\n "model": "qwen2.5/coder/1.5b/base/vllm", # Updated model name to match the command\n "api-key": "d2aed710-a47b-4a3f-84f4-b02628c45e49",\n "logprobs": 5, # Set logprobs to an integer value\n "run-time": "60s", # Reduced runtime for faster testing\n "temperature": 1.0, # Add temperature parameter\n "top_p": 1.0, # Add top_p parameter to ensure no early stopping\n "frequency_penalty": 0.0, # Add frequency penalty\n "presence_penalty": 0.0, # Add presence penalty\n }\n\n # Create a results directory with timestamp\n timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")\n results_dir = os.path.join("results", f"run_{timestamp}")\n\n load_test = LLMLoadTest(base_config, results_dir)\n\n concurrent_users = [1]#, 2, 5, 10, 20, 50] # Test configurations\n max_tokens = [50]#1, 15, 30, 50]\n wait_time = 5 # Increased wait time between runs to ensure cleanup\n\n try:\n for u in concurrent_users:\n for o in max_tokens:\n test_name = f"test_u{u}_o{o}"\n print(f"\\n=== Starting test {test_name} ===")\n load_test.run_test(test_name, u, o)\n print(f"=== Test {test_name} completed ===")\n print(f"Waiting for {wait_time} seconds before the next run...")\n time.sleep(wait_time)\n\n load_test.generate_report()\n print("\\nAll tests completed successfully!")\n \n except KeyboardInterrupt:\n print("\\nTests interrupted by user")\n load_test.generate_report() # Generate report for completed tests\n except Exception as e:\n print(f"\\nError during test execution: {str(e)}")\n load_test.generate_report() # Generate report for completed tests\n\nif __name__ == "__main__":\n main()\n', + line1: 1, + line2: 356, + }, + ]), + }, + { + ftm_role: "assistant", + ftm_content: + "Let me also check the load_test.py file to understand how the actual load testing is performed:", + ftm_tool_calls: [ + { + id: "toolu_01WXJiwA9oLdRRAS4SADKvmM", + function: { + arguments: + '{"paths": "/home/fupfv/git/benchmark1_0701/src/load_test.py", "skeleton": false}', + name: "cat", }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: "/home/fupfv/git/benchmark1_0701/docs/DEVELOPER_GUIDE.md", - file_action: "edit", - line1: 1, - line2: 1, - lines_remove: "", - lines_add: - '# LLM Load Testing Tool Developer Guide\n\nThis guide is intended for developers who want to extend, modify, or contribute to the LLM Load Testing Tool. It provides technical details about the codebase, architecture, and guidelines for development.\n\n## Table of Contents\n\n1. [Architecture Overview](#architecture-overview)\n2. [Core Components](#core-components)\n3. [Adding New Features](#adding-new-features)\n - [Adding a New Provider](#adding-a-new-provider)\n - [Adding New Metrics](#adding-new-metrics)\n - [Adding New Visualizations](#adding-new-visualizations)\n4. [Testing Guidelines](#testing-guidelines)\n5. [Code Style and Conventions](#code-style-and-conventions)\n6. [Contributing Guidelines](#contributing-guidelines)\n\n## Architecture Overview\n\nThe LLM Load Testing Tool is built with a modular architecture that separates the concerns of test execution, data collection, and visualization. The main components are:\n\n1. **Test Runner**: Orchestrates the execution of load tests with different parameters.\n2. **Load Test Implementation**: Uses Locust to generate load and collect metrics.\n3. **Results Processing**: Parses and processes the raw test results.\n4. **Visualization Tools**: Generate charts and reports from the processed results.\n\nThe data flow through the system is as follows:\n\n```\nTest Configuration → Test Runner → Load Test Implementation → Raw Results → Results Processing → Visualizations\n```\n\n## Core Components\n\n### 1. llm_load_test_runner.py\n\nThis is the main entry point for running load tests. It:\n- Configures test parameters\n- Creates a results directory\n- Runs tests with different combinations of users and output tokens\n- Generates summary reports\n\nKey classes and methods:\n- `LLMLoadTest`: Main class for orchestrating tests\n - `run_test(test_name, users, output_tokens)`: Runs a single test\n - `write_test_report(...)`: Writes test results to files\n - `parse_output(output)`: Parses metrics from test output\n - `generate_report()`: Generates a summary report\n\n### 2. load_test.py\n\nThis file contains the Locust implementation for generating load. It:\n- Defines user behavior for load testing\n- Implements different provider classes for various LLM APIs\n- Collects and reports metrics\n\nKey classes:\n- `LLMUser`: Locust user class that sends requests to the LLM server\n- `BaseProvider`: Abstract base class for LLM providers\n - `OpenAIProvider`, `VllmProvider`, etc.: Provider-specific implementations\n- `LengthSampler`: Utility for sampling token lengths\n- `FixedQPSPacer`: Utility for controlling request rate\n\n### 3. llm_test_logger.py\n\nHandles logging of test results and details.\n\n### 4. visualize_results.py\n\nGenerates visualizations from test results. Key components:\n- `ResultsVisualizer`: Main class for generating visualizations\n - Various plotting methods for different metrics\n - `generate_all_visualizations()`: Generates all visualizations\n\n### 5. compare_runs.py\n\nCompares results from different test runs.\n\n### 6. dashboard_generator.py\n\nGenerates Grafana dashboard configurations.\n\n## Adding New Features\n\n### Adding a New Provider\n\nTo add support for a new LLM provider:\n\n1. Create a new provider class in `load_test.py` that inherits from `BaseProvider`:\n\n```python\nclass NewProvider(BaseProvider):\n DEFAULT_MODEL_NAME = "default-model-name" # Optional default model name\n \n def get_url(self):\n """Return the API endpoint URL."""\n return "/api/endpoint"\n \n def format_payload(self, prompt, max_tokens, images):\n """Format the request payload for this provider."""\n data = {\n "model": self.model,\n "prompt": prompt,\n "max_tokens": max_tokens,\n # Provider-specific parameters\n "provider_param": "value"\n }\n return data\n \n def parse_output_json(self, data, prompt):\n """Parse the response from this provider."""\n # Extract text, token counts, etc.\n text = data.get("output", "")\n tokens = data.get("token_count", 0)\n \n return ChunkMetadata(\n text=text,\n logprob_tokens=None,\n usage_tokens=tokens,\n prompt_usage_tokens=None\n )\n```\n\n2. Add the provider to the `PROVIDER_CLASS_MAP` dictionary:\n\n```python\nPROVIDER_CLASS_MAP = {\n # Existing providers\n "openai": OpenAIProvider,\n "vllm": VllmProvider,\n # Add your new provider\n "new_provider": NewProvider,\n}\n```\n\n### Adding New Metrics\n\nTo add a new metric to track:\n\n1. Modify the `LLMUser.generate_text` method in `load_test.py` to collect the new metric:\n\n```python\n@task\ndef generate_text(self):\n # Existing code...\n \n # Add your new metric calculation\n new_metric_value = calculate_new_metric(response, prompt)\n \n # Add the custom metric\n add_custom_metric("new_metric_name", new_metric_value)\n```\n\n2. Update the `write_test_report` method in `llm_load_test_runner.py` to include the new metric:\n\n```python\ndef write_test_report(self, test_name, response_text, duration, exit_code, prompt_tokens, provider=None, model=None, generation_tokens=None, stream=None, temperature=None, logprobs=None, concurrency=None, time_to_first_token=None, latency_per_token=None, num_tokens=None, total_latency=None, num_requests=None, qps=None, new_metric=None):\n # Add the new metric to report_data\n report_data = {\n # Existing fields...\n "New Metric": new_metric,\n }\n \n # Update CSV writer to include the new field\n writer.writerow(["Response received", ..., "New Metric", ...])\n writer.writerow([response_text, ..., new_metric, ...])\n```\n\n### Adding New Visualizations\n\nTo add a new visualization:\n\n1. Add a new plotting method to the `ResultsVisualizer` class in `visualize_results.py`:\n\n```python\ndef plot_new_visualization(self):\n """Plot a new visualization."""\n if \'required_column\' not in self.data.columns:\n print("Required column not found for new visualization")\n return\n \n plt.figure(figsize=(12, 8))\n \n # Your plotting code here\n sns.lineplot(x=\'Concurrency\', y=\'new_metric\', data=self.data)\n \n plt.title(\'New Metric Visualization\')\n plt.xlabel(\'Concurrent Users\')\n plt.ylabel(\'New Metric\')\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(self.output_dir / \'new_visualization.png\')\n plt.close()\n```\n\n2. Add your new method to the `generate_all_visualizations` method:\n\n```python\ndef generate_all_visualizations(self):\n # Existing visualizations\n self.plot_latency_by_concurrency()\n # ...\n \n # Add your new visualization\n self.plot_new_visualization()\n \n self.generate_summary_report()\n```\n\n## Testing Guidelines\n\nWhen developing new features or fixing bugs, follow these testing guidelines:\n\n1. **Unit Tests**: Write unit tests for new functionality, especially for data processing and parsing logic.\n\n2. **Integration Tests**: Test the integration between components, such as running a test and verifying that the results are correctly processed.\n\n3. **Manual Testing**: For visualizations and UI components, perform manual testing to ensure they render correctly.\n\n4. **Test with Different Providers**: Ensure that new features work with all supported providers.\n\n5. **Test with Different Parameters**: Verify that the tool works correctly with different combinations of users, tokens, and other parameters.\n\n6. **Error Handling**: Test error cases to ensure the tool gracefully handles failures.\n\n## Code Style and Conventions\n\nFollow these style guidelines when contributing to the codebase:\n\n1. **PEP 8**: Follow the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code.\n\n2. **Docstrings**: Use docstrings to document classes, methods, and functions. Follow the [Google style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstrings.\n\n3. **Type Hints**: Use type hints to indicate parameter and return types.\n\n4. **Naming Conventions**:\n - Classes: `CamelCase`\n - Functions and methods: `snake_case`\n - Variables: `snake_case`\n - Constants: `UPPER_SNAKE_CASE`\n\n5. **Comments**: Add comments to explain complex logic or non-obvious behavior.\n\n6. **Imports**: Organize imports in the following order:\n - Standard library imports\n - Third-party library imports\n - Local application imports\n\n## Contributing Guidelines\n\nTo contribute to the LLM Load Testing Tool:\n\n1. **Fork the Repository**: Create a fork of the repository on GitHub.\n\n2. **Create a Branch**: Create a branch for your feature or bug fix.\n\n3. **Make Changes**: Implement your changes following the code style guidelines.\n\n4. **Write Tests**: Add tests for your changes to ensure they work correctly.\n\n5. **Update Documentation**: Update the documentation to reflect your changes.\n\n6. **Submit a Pull Request**: Submit a pull request with your changes.\n\n7. **Code Review**: Address any feedback from the code review.\n\n8. **Merge**: Once approved, your changes will be merged into the main branch.\n\n### Commit Message Guidelines\n\nFollow these guidelines for commit messages:\n\n- Use the present tense ("Add feature" not "Added feature")\n- Use the imperative mood ("Move cursor to..." not "Moves cursor to...")\n- Limit the first line to 72 characters or less\n- Reference issues and pull requests liberally after the first line\n\nExample:\n```\nAdd support for XYZ provider\n\nThis commit adds support for the XYZ provider, including:\n- Implementation of XYZProvider class\n- Unit tests for XYZ provider\n- Documentation updates\n\nFixes #123\n```\n\nThank you for contributing to the LLM Load Testing Tool!\n', - file_name_rename: null, - application_details: "", + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01WXJiwA9oLdRRAS4SADKvmM", + ftm_content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/src/load_test.py\n", + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ + { + file_name: "benchmark1_0701/src/load_test.py", + file_content: + 'import abc\nimport argparse\nimport csv\nfrom dataclasses import dataclass\nfrom functools import partial\nimport os\nimport random\nimport sys\nimport traceback\nfrom typing import Optional\nfrom locust import HttpUser, task, events, constant_pacing\nimport copy\nimport json\nimport time\nimport orjson\nimport threading\nimport logging\nimport requests\n\n# Set up logging\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\'%(asctime)s - %(levelname)s - %(message)s\',\n handlers=[\n logging.StreamHandler(),\n logging.FileHandler(\'load_test.log\')\n ]\n)\n\ndef test_single_request():\n url = "https://dogfood.pilot.smallcloud.ai/v1/completions"\n headers = {\n "Content-Type": "application/json",\n "Authorization": "Bearer d2aed710-a47b-4a3f-84f4-b02628c45e49"\n }\n data = {\n "model": "starcoder2/3b/vllm",\n "prompt": "print", # Shorter prompt\n "max_tokens": 15,\n "temperature": 1.0,\n "stream": False,\n "timeout": 10\n }\n \n logging.info("Sending request with data: %s", json.dumps(data, indent=2))\n try:\n response = requests.post(url, headers=headers, json=data, timeout=10)\n logging.info(f"Response status: {response.status_code}")\n logging.info(f"Response headers: {dict(response.headers)}")\n \n if response.status_code == 200:\n resp_json = response.json()\n logging.info("Raw response: %s", json.dumps(resp_json, indent=2))\n \n # Check token counts\n usage = resp_json.get("usage", {})\n generated_tokens = resp_json.get("generated_tokens_n")\n \n logging.info("\\nToken counts:")\n logging.info(f"usage.completion_tokens: {usage.get(\'completion_tokens\')}")\n logging.info(f"generated_tokens_n: {generated_tokens}")\n \n # Check text output\n choices = resp_json.get("choices", [])\n if choices:\n text = choices[0].get("text", "")\n logging.info(f"\\nGenerated text ({len(text)} chars):")\n logging.info(text)\n else:\n logging.error("Error response: %s", response.text)\n except requests.exceptions.Timeout:\n logging.error("Request timed out after 10 seconds")\n except Exception as e:\n logging.error("Error during request: %s", str(e))\n\nif __name__ == "__main__":\n test_single_request()\n\ntry:\n import locust_plugins\nexcept ImportError:\n print("locust-plugins is not installed, Grafana won\'t work")\n\n\ndef add_custom_metric(name, value, length_value=0):\n events.request.fire(\n request_type="METRIC",\n name=name,\n response_time=value,\n response_length=length_value,\n exception=None,\n context=None,\n )\n\n\nprompt_prefix = "Pad " # exactly one token\n# "Lengthy" prompt borrowed from nat.dev\nprompt = """Generate a Django application with Authentication, JWT, Tests, DB support. Show docker-compose for python and postgres. Show the complete code for every file!"""\nprompt_tokens = 35 # from Llama tokenizer tool (so we don\'t import it here)\nprompt_random_tokens = 10\n\n\nclass FixedQPSPacer:\n _instance = None\n _lock = threading.Lock()\n\n def __init__(self, qps, distribution):\n self.qps = qps\n self.distribution = distribution\n\n # It\'s kind of thread safe thanks to GIL as the only state is `t` - good enough for a loadtest\n def gen():\n t = time.time()\n mean_wait = 1 / self.qps\n while True:\n if self.distribution == "exponential":\n wait = random.expovariate(1 / mean_wait)\n elif self.distribution == "uniform":\n wait = random.uniform(0, 2 * mean_wait)\n elif self.distribution == "constant":\n wait = mean_wait\n else:\n print("Unknown distribution {self.distribution}")\n os._exit(1)\n t += wait\n yield t\n\n self.iterator = gen()\n\n @classmethod\n def instance(cls, qps, distribution):\n with cls._lock:\n if cls._instance is None:\n cls._instance = cls(qps, distribution)\n else:\n assert cls._instance.qps == qps\n assert cls._instance.distribution == distribution\n return cls._instance\n\n def wait_time_till_next(self):\n with self._lock:\n t = next(self.iterator)\n now = time.time()\n if now > t:\n print(\n f"WARNING: not enough locust users to keep up with the desired QPS. Either the number of locust users is too low or the server is overloaded. Delay: {now-t:.3f}s"\n )\n return 0\n return t - now\n\n\nclass LengthSampler:\n def __init__(self, distribution: str, mean: int, cap: Optional[int], alpha: float):\n self.distribution = distribution\n self.mean = mean\n self.cap = cap\n self.alpha = alpha\n\n if self.distribution == "exponential":\n self.sample_func = lambda: int(random.expovariate(1 / self.mean))\n elif self.distribution == "uniform":\n mx = self.mean + int(self.alpha * self.mean)\n if self.cap is not None:\n mx = min(mx, self.cap)\n self.sample_func = lambda: random.randint(\n max(1, self.mean - int(self.alpha * self.mean)), mx\n )\n elif self.distribution == "constant":\n self.sample_func = lambda: self.mean\n elif self.distribution == "normal":\n self.sample_func = lambda: int(\n random.gauss(self.mean, self.mean * self.alpha)\n )\n else:\n raise ValueError(f"Unknown distribution {self.distribution}")\n\n def sample(self) -> int:\n for _ in range(1000):\n sample = self.sample_func()\n if sample <= 0:\n continue\n if self.cap is not None and sample > self.cap:\n continue\n return sample\n else:\n raise ValueError(\n "Can\'t sample a value after 1000 attempts, check distribution parameters"\n )\n\n def __str__(self):\n r = int(self.mean * self.alpha)\n if self.distribution == "constant":\n s = str(self.mean)\n elif self.distribution == "uniform":\n s = f"uniform({self.mean} +/- {r})"\n elif self.distribution == "normal":\n s = f"normal({self.mean}, {r})"\n elif self.distribution == "exponential":\n s = f"exponential({self.mean})"\n else:\n assert False\n if self.cap is not None:\n s += f" capped at {self.cap}"\n return s\n\n\nclass InitTracker:\n lock = threading.Lock()\n users = None\n first_request_done = 0\n logging_params = None\n environment = None\n tokenizer = None\n\n @classmethod\n def notify_init(cls, environment, logging_params):\n with cls.lock:\n if cls.environment is None:\n cls.environment = environment\n if cls.logging_params is None:\n cls.logging_params = logging_params\n else:\n assert (\n cls.logging_params == logging_params\n ), f"Inconsistent settings between workers: {cls.logging_params} != {logging_params}"\n\n @classmethod\n def notify_first_request(cls):\n with cls.lock:\n if (\n cls.environment.parsed_options.qps is not None\n and cls.first_request_done == 0\n ):\n # if in QPS mode, reset after first successful request comes back\n cls.reset_stats()\n cls.first_request_done += 1\n if (\n cls.environment.parsed_options.qps is not None\n and cls.first_request_done == 0\n and cls.users == cls.first_request_done\n ):\n # if in fixed load mode, reset after all users issued one request (we\'re in a steady state)\n cls.reset_stats()\n\n @classmethod\n def notify_spawning_complete(cls, user_count):\n with cls.lock:\n cls.users = user_count\n if cls.users == cls.first_request_done:\n cls.reset_stats()\n\n @classmethod\n def reset_stats(cls):\n assert cls.environment.runner, "only local mode is supported"\n print("Resetting stats after traffic reach a steady state")\n cls.environment.events.reset_stats.fire()\n cls.environment.runner.stats.reset_all()\n\n @classmethod\n def load_tokenizer(cls, dir):\n if not dir:\n return None\n with cls.lock:\n if cls.tokenizer:\n return cls.tokenizer\n import transformers\n\n cls.tokenizer = transformers.AutoTokenizer.from_pretrained(dir)\n cls.tokenizer.add_bos_token = False\n cls.tokenizer.add_eos_token = False\n return cls.tokenizer\n\n\nevents.spawning_complete.add_listener(InitTracker.notify_spawning_complete)\n\n\n@dataclass\nclass ChunkMetadata:\n text: str\n logprob_tokens: Optional[int]\n usage_tokens: Optional[int]\n prompt_usage_tokens: Optional[int]\n max_tokens: Optional[int] = None\n should_retry: bool = False\n\n\nclass BaseProvider(abc.ABC):\n DEFAULT_MODEL_NAME = None\n\n def __init__(self, model, parsed_options):\n self.model = model\n self.parsed_options = parsed_options\n\n @abc.abstractmethod\n def get_url(self): ...\n\n @abc.abstractmethod\n def format_payload(self, prompt, max_tokens, images): ...\n\n @abc.abstractmethod\n def parse_output_json(self, json, prompt): ...\n\n\nclass OpenAIProvider(BaseProvider):\n def get_url(self):\n if self.parsed_options.chat:\n return "v1/chat/completions"\n else:\n #return ""\n return "v1/completions"\n\n def format_payload(self, prompt, max_tokens, images):\n data = {\n "model": self.model,\n "max_tokens": max_tokens,\n "stream": self.parsed_options.stream,\n "temperature": self.parsed_options.temperature,\n # Add strict token control\n "min_tokens": max_tokens, # Force minimum tokens\n "ignore_eos": True, # Don\'t stop on EOS token\n "stop": None, # Disable stop sequences\n "best_of": 1, # Disable multiple sequences\n "use_beam_search": False, # Disable beam search\n "top_p": 1.0, # Disable nucleus sampling\n "top_k": 0, # Disable top-k sampling\n "presence_penalty": 0.0, # No presence penalty\n "frequency_penalty": 0.0, # No frequency penalty\n }\n if self.parsed_options.chat:\n if images is None:\n data["messages"] = [{"role": "user", "content": prompt}]\n else:\n image_urls = []\n for image in images:\n image_urls.append(\n {"type": "image_url", "image_url": {"url": image}}\n )\n data["messages"] = [\n {\n "role": "user",\n "content": [{"type": "text", "text": prompt}, *image_urls],\n }\n ]\n else:\n data["prompt"] = prompt\n if images is not None:\n data["images"] = images\n if self.parsed_options.logprobs is not None:\n data["logprobs"] = self.parsed_options.logprobs\n return data\n\n def parse_output_json(self, data, prompt):\n # Check for error response\n if data.get("status") == "error":\n error_msg = data.get(\'human_readable_message\', \'unknown error\')\n print(f"API Error: {error_msg}")\n \n # For timeout errors, return a special metadata\n if error_msg == "timeout":\n return ChunkMetadata(\n text="[TIMEOUT]",\n logprob_tokens=None,\n usage_tokens=self.parsed_options.max_tokens, # Use requested token count\n prompt_usage_tokens=None,\n max_tokens=self.parsed_options.max_tokens\n )\n \n # For other errors\n return ChunkMetadata(\n text="[ERROR]",\n logprob_tokens=None,\n usage_tokens=0,\n prompt_usage_tokens=None,\n max_tokens=None\n )\n \n usage = data.get("usage", None)\n generated_tokens = data.get("generated_tokens_n", None)\n\n # Handle empty choices array\n choices = data.get("choices", [])\n if not choices:\n # Return empty text with usage info if available\n return ChunkMetadata(\n text="",\n logprob_tokens=None,\n usage_tokens=generated_tokens if generated_tokens is not None else (usage["completion_tokens"] if usage else self.parsed_options.max_tokens),\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=data.get("max_tokens", self.parsed_options.max_tokens)\n )\n\n choice = choices[0]\n if self.parsed_options.chat:\n if self.parsed_options.stream:\n text = choice["delta"].get("content", "")\n else:\n text = choice["message"]["content"]\n else:\n text = choice.get("text", "")\n\n logprobs = choice.get("logprobs", None)\n tokens = generated_tokens if generated_tokens is not None else (\n usage["completion_tokens"] if usage else self.parsed_options.max_tokens\n )\n\n # Validate token count matches request\n if tokens != self.parsed_options.max_tokens:\n print(f"WARNING: Generated tokens {tokens} != requested {self.parsed_options.max_tokens}")\n\n return ChunkMetadata(\n text=text,\n logprob_tokens=len(logprobs["tokens"]) if logprobs else None,\n usage_tokens=tokens,\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=data.get("max_tokens", self.parsed_options.max_tokens)\n )\n\n\nclass FireworksProvider(OpenAIProvider):\n def format_payload(self, prompt, max_tokens, images):\n data = super().format_payload(prompt, max_tokens, images)\n data["min_tokens"] = max_tokens\n data["prompt_cache_max_len"] = 0\n return data\n\n\nclass VllmProvider(OpenAIProvider):\n def format_payload(self, prompt, max_tokens, images):\n data = {\n "model": self.model,\n "prompt": prompt,\n "max_tokens": max_tokens,\n "stream": self.parsed_options.stream,\n "temperature": self.parsed_options.temperature,\n # VLLM specific parameters for exact token generation\n "ignore_eos": True,\n "min_tokens": max_tokens,\n "stop": [], # Empty list instead of None\n "best_of": 1,\n "use_beam_search": False,\n "top_p": 1.0,\n "top_k": -1, # -1 instead of 0 for VLLM\n "presence_penalty": 0.0,\n "frequency_penalty": 0.0\n }\n if self.parsed_options.logprobs is not None:\n data["logprobs"] = self.parsed_options.logprobs\n if images is not None:\n data["images"] = images\n return data\n\n def parse_output_json(self, data, prompt):\n # Handle error responses\n if data.get("status") == "error":\n error_msg = data.get(\'human_readable_message\', \'unknown error\')\n print(f"API Error: {error_msg}")\n return ChunkMetadata(\n text="[ERROR]",\n logprob_tokens=None,\n usage_tokens=0,\n prompt_usage_tokens=None,\n max_tokens=None,\n should_retry=False\n )\n \n usage = data.get("usage", None)\n generated_tokens = data.get("generated_tokens_n", None)\n choices = data.get("choices", [])\n \n if not choices:\n return ChunkMetadata(\n text="",\n logprob_tokens=None,\n usage_tokens=generated_tokens if generated_tokens is not None else (usage["completion_tokens"] if usage else self.parsed_options.max_tokens),\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=self.parsed_options.max_tokens,\n should_retry=False\n )\n\n choice = choices[0]\n text = choice.get("text", "")\n logprobs = choice.get("logprobs", None)\n tokens = generated_tokens if generated_tokens is not None else (\n usage["completion_tokens"] if usage else self.parsed_options.max_tokens\n )\n\n # Log token generation details\n print(f"Generated tokens: {tokens}, Requested: {self.parsed_options.max_tokens}")\n \n return ChunkMetadata(\n text=text,\n logprob_tokens=len(logprobs["tokens"]) if logprobs else None,\n usage_tokens=tokens,\n prompt_usage_tokens=usage.get("prompt_tokens", None) if usage else None,\n max_tokens=self.parsed_options.max_tokens,\n should_retry=False\n )\n # Force exact token generation\n data.update({\n "ignore_eos": True,\n "max_tokens": max_tokens,\n "min_tokens": max_tokens,\n "stop": None,\n "best_of": 1,\n "use_beam_search": False,\n "top_p": 1.0, # Disable nucleus sampling\n "top_k": 0, # Disable top-k sampling\n "presence_penalty": 0.0,\n "frequency_penalty": 0.0,\n "temperature": 1.0, # Use standard temperature\n "early_stopping": False\n })\n return data\n\n\nclass TogetherProvider(OpenAIProvider):\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n return "/"\n\n def format_payload(self, prompt, max_tokens, images):\n data = super().format_payload(prompt, max_tokens, images)\n data["ignore_eos"] = True\n data["stream_tokens"] = data.pop("stream")\n return data\n\n def parse_output_json(self, data, prompt):\n if not self.parsed_options.stream:\n data = data["output"]\n return super().parse_output_json(data, prompt)\n\n\nclass TritonInferProvider(BaseProvider):\n DEFAULT_MODEL_NAME = "ensemble"\n\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n assert not self.parsed_options.stream, "Stream is not supported"\n return f"/v2/models/{self.model}/infer"\n\n def format_payload(self, prompt, max_tokens, images):\n assert images is None, "images are not supported"\n # matching latest TRT-LLM example, your model configuration might be different\n data = {\n "inputs": [\n {\n "name": "text_input",\n "datatype": "BYTES",\n "shape": [1, 1],\n "data": [[prompt]],\n },\n {\n "name": "max_tokens",\n "datatype": "UINT32",\n "shape": [1, 1],\n "data": [[max_tokens]],\n },\n {\n "name": "bad_words",\n "datatype": "BYTES",\n "shape": [1, 1],\n "data": [[""]],\n },\n {\n "name": "stop_words",\n "datatype": "BYTES",\n "shape": [1, 1],\n "data": [[""]],\n },\n {\n "name": "temperature",\n "datatype": "FP32",\n "shape": [1, 1],\n "data": [[self.parsed_options.temperature]],\n },\n ]\n }\n assert self.parsed_options.logprobs is None, "logprobs are not supported"\n return data\n\n def parse_output_json(self, data, prompt):\n for output in data["outputs"]:\n if output["name"] == "text_output":\n assert output["datatype"] == "BYTES"\n assert output["shape"] == [1]\n text = output["data"][0]\n # Triton returns the original prompt in the output, cut it off\n text = text.removeprefix(" ")\n if text.startswith(prompt):\n # HF tokenizers get confused by the leading space\n text = text[len(prompt) :].removeprefix(" ")\n else:\n print("WARNING: prompt not found in the output")\n return ChunkMetadata(\n text=text,\n logprob_tokens=None,\n usage_tokens=None,\n prompt_usage_tokens=None,\n )\n raise ValueError("text_output not found in the response")\n\n\nclass TritonGenerateProvider(BaseProvider):\n DEFAULT_MODEL_NAME = "ensemble"\n\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n stream_suffix = "_stream" if self.parsed_options.stream else ""\n return f"/v2/models/{self.model}/generate{stream_suffix}"\n\n def format_payload(self, prompt, max_tokens, images):\n assert images is None, "images are not supported"\n data = {\n "text_input": prompt,\n "max_tokens": max_tokens,\n "stream": self.parsed_options.stream,\n "temperature": self.parsed_options.temperature,\n # for whatever reason these has to be provided\n "bad_words": "",\n "stop_words": "",\n }\n assert self.parsed_options.logprobs is None, "logprobs are not supported"\n return data\n\n def parse_output_json(self, data, prompt):\n text = data["text_output"]\n if not self.parsed_options.stream:\n # Triton returns the original prompt in the output, cut it off\n text = text.removeprefix(" ")\n if text.startswith(prompt):\n # HF tokenizers get confused by the leading space\n text = text[len(prompt) :].removeprefix(" ")\n else:\n print("WARNING: prompt not found in the output")\n return ChunkMetadata(\n text=text,\n logprob_tokens=None,\n usage_tokens=None,\n prompt_usage_tokens=None,\n )\n\n\nclass TgiProvider(BaseProvider):\n DEFAULT_MODEL_NAME = ""\n\n def get_url(self):\n assert not self.parsed_options.chat, "Chat is not supported"\n stream_suffix = "_stream" if self.parsed_options.stream else ""\n return f"/generate{stream_suffix}"\n\n def format_payload(self, prompt, max_tokens, images):\n assert images is None, "images are not supported"\n data = {\n "inputs": prompt,\n "parameters": {\n "max_new_tokens": max_tokens,\n "temperature": self.parsed_options.temperature,\n "top_n_tokens": self.parsed_options.logprobs,\n "details": self.parsed_options.logprobs is not None,\n },\n }\n return data\n\n def parse_output_json(self, data, prompt):\n if "token" in data:\n # streaming chunk\n return ChunkMetadata(\n text=data["token"]["text"],\n logprob_tokens=1,\n usage_tokens=None,\n prompt_usage_tokens=None,\n )\n else:\n # non-streaming response\n return ChunkMetadata(\n text=data["generated_text"],\n logprob_tokens=(\n len(data["details"]["tokens"]) if "details" in data else None\n ),\n usage_tokens=(\n data["details"]["generated_tokens"] if "details" in data else None\n ),\n prompt_usage_tokens=None,\n )\n\n\nPROVIDER_CLASS_MAP = {\n "fireworks": FireworksProvider,\n "vllm": VllmProvider,\n "openai": OpenAIProvider,\n "anyscale": OpenAIProvider,\n "together": TogetherProvider,\n "triton-infer": TritonInferProvider,\n "triton-generate": TritonGenerateProvider,\n "tgi": TgiProvider,\n}\n\n\ndef _load_curl_like_data(text):\n """\n Either use the passed string or load from a file if the string is `@filename`\n """\n if text.startswith("@"):\n try:\n if text.endswith(".jsonl"):\n with open(text[1:], "r") as f:\n return [json.loads(line) for line in f]\n else:\n with open(text[1:], "r") as f:\n return f.read()\n except Exception as e:\n raise ValueError(f"Failed to read file {text[1:]}") from e\n else:\n return text\n\n\nclass LLMUser(HttpUser):\n # no wait time, so every user creates a continuous load, sending requests as quickly as possible\n\n def on_start(self):\n try:\n self._on_start()\n except Exception as e:\n print(f"Failed to initialize: {repr(e)}")\n print(traceback.format_exc())\n sys.exit(1)\n\n def _guess_provider(self):\n self.model = self.environment.parsed_options.model\n self.provider = self.environment.parsed_options.provider\n # guess based on URL\n if self.provider is None:\n if "fireworks.ai" in self.host:\n self.provider = "fireworks"\n elif "together" in self.host:\n self.provider = "together"\n elif "openai" in self.host:\n self.provider = "openai"\n elif "anyscale" in self.host:\n self.provider = "anyscale"\n\n if (\n self.model is None\n and self.provider is not None\n and PROVIDER_CLASS_MAP[self.provider].DEFAULT_MODEL_NAME is not None\n ):\n self.model = PROVIDER_CLASS_MAP[self.provider].DEFAULT_MODEL_NAME\n\n if self.model and self.provider:\n return\n\n # vllm doesn\'t support /model/ endpoint, so iterate over all models\n try:\n resp = self.client.get("/v1/models")\n resp.raise_for_status()\n resp = resp.json()\n except Exception as e:\n raise ValueError(\n "Argument --model or --provider was not specified and /v1/models failed"\n ) from e\n\n models = resp["data"]\n assert len(models) > 0, "No models found in /v1/models"\n owned_by = None\n # pick the first model\n for m in models:\n if self.model is None or m["id"] == self.model:\n self.model = m["id"]\n owned_by = m["owned_by"]\n break\n if self.provider is None:\n if not owned_by:\n raise ValueError(\n f"Model {self.model} not found in /v1/models. Specify --provider explicitly"\n )\n if owned_by in ["vllm", "fireworks"]:\n self.provider = owned_by\n else:\n raise ValueError(\n f"Can\'t detect provider, specify it explicitly with --provider, owned_by={owned_by}"\n )\n\n def _on_start(self):\n self.client.headers["Content-Type"] = "application/json"\n if self.environment.parsed_options.api_key:\n self.client.headers["Authorization"] = (\n "Bearer " + self.environment.parsed_options.api_key\n )\n self._guess_provider()\n print(f" Provider {self.provider} using model {self.model} ".center(80, "*"))\n self.provider_formatter = PROVIDER_CLASS_MAP[self.provider](\n self.model, self.environment.parsed_options\n )\n\n self.stream = self.environment.parsed_options.stream\n prompt_chars = self.environment.parsed_options.prompt_chars\n if self.environment.parsed_options.prompt_text:\n self.input = _load_curl_like_data(\n self.environment.parsed_options.prompt_text\n )\n elif prompt_chars:\n self.input = (\n prompt_prefix * (prompt_chars // len(prompt_prefix) + 1) + prompt\n )[:prompt_chars]\n else:\n min_prompt_len = (\n prompt_tokens\n + prompt_random_tokens\n * self.environment.parsed_options.prompt_randomize\n )\n assert (\n self.environment.parsed_options.prompt_tokens >= min_prompt_len\n ), f"Minimal prompt length is {min_prompt_len}"\n self.input = (\n prompt_prefix\n * (self.environment.parsed_options.prompt_tokens - min_prompt_len)\n + prompt\n )\n self.max_tokens_sampler = LengthSampler(\n distribution=self.environment.parsed_options.max_tokens_distribution,\n mean=self.environment.parsed_options.max_tokens,\n cap=self.environment.parsed_options.max_tokens_cap,\n alpha=self.environment.parsed_options.max_tokens_range,\n )\n self.temperature = self.environment.parsed_options.temperature\n\n logging_params = {\n # TODO: add some server info with git version\n "provider": self.provider,\n "model": self.model,\n "prompt_tokens": self.environment.parsed_options.prompt_tokens, # might be overwritten based on metric\n "generation_tokens": str(self.max_tokens_sampler),\n "stream": self.stream,\n "temperature": self.temperature,\n "logprobs": self.environment.parsed_options.logprobs,\n }\n InitTracker.notify_init(self.environment, logging_params)\n\n self.tokenizer = InitTracker.load_tokenizer(\n self.environment.parsed_options.tokenizer\n )\n if self.tokenizer:\n self.prompt_tokenizer_tokens = len(\n self.tokenizer.encode(self._get_input()[0])\n )\n else:\n self.prompt_tokenizer_tokens = None\n\n if self.environment.parsed_options.qps is not None:\n if self.environment.parsed_options.burst:\n raise ValueError("Burst and QPS modes are mutually exclusive")\n pacer = FixedQPSPacer.instance(\n self.environment.parsed_options.qps,\n self.environment.parsed_options.qps_distribution,\n )\n # it will be called by Locust after each task\n self.wait_time = pacer.wait_time_till_next\n self.wait()\n elif self.environment.parsed_options.burst:\n self.wait_time = partial(\n constant_pacing(self.environment.parsed_options.burst), self\n )\n else:\n # introduce initial delay to avoid all users hitting the service at the same time\n time.sleep(random.random())\n\n self.first_done = False\n\n def _get_input(self):\n def _maybe_randomize(prompt):\n if not self.environment.parsed_options.prompt_randomize:\n return prompt\n # single letters are single tokens\n return (\n " ".join(\n chr(ord("a") + random.randint(0, 25))\n for _ in range(prompt_random_tokens)\n )\n + " "\n + prompt\n )\n\n if isinstance(self.input, str):\n return _maybe_randomize(self.input), None\n else:\n item = self.input[random.randint(0, len(self.input) - 1)]\n assert "prompt" in item\n return _maybe_randomize(item["prompt"]), item.get("images", None)\n\n @task\n def generate_text(self):\n max_tokens = self.max_tokens_sampler.sample()\n prompt, images = self._get_input()\n data = self.provider_formatter.format_payload(prompt, max_tokens, images)\n t_start = time.perf_counter()\n\n logging.debug("Sending request with data: %s", json.dumps(data, indent=2))\n \n with self.client.post(\n self.provider_formatter.get_url(),\n data=json.dumps(data),\n stream=True,\n catch_response=True,\n ) as response:\n logging.debug("Got response status: %d", response.status_code)\n logging.debug("Response headers: %s", dict(response.headers))\n \n dur_chunks = []\n combined_text = ""\n done = False\n prompt_usage_tokens = self.prompt_tokenizer_tokens\n total_usage_tokens = None\n total_logprob_tokens = None\n try:\n response.raise_for_status()\n except Exception as e:\n logging.error("Response error text: %s", response.text)\n raise RuntimeError(f"Error in response: {response.text}") from e\n t_first_token = None\n for chunk in response.iter_lines(delimiter=b"\\n\\n"):\n if t_first_token is None:\n t_first_token = time.perf_counter()\n t_prev = time.perf_counter()\n\n if len(chunk) == 0:\n continue # come providers send empty lines between data chunks\n if done:\n if chunk != b"data: [DONE]":\n print(f"WARNING: Received more chunks after [DONE]: {chunk}")\n try:\n now = time.perf_counter()\n dur_chunks.append(now - t_prev)\n t_prev = now\n if self.stream:\n assert chunk.startswith(\n b"data:"\n ), f"Unexpected chunk not starting with \'data\': {chunk}"\n chunk = chunk[len(b"data:") :]\n if chunk.strip() == b"[DONE]":\n done = True\n continue\n logging.debug("Processing chunk: %s", chunk.decode())\n data = orjson.loads(chunk)\n logging.debug("Parsed chunk data: %s", json.dumps(data, indent=2))\n out = self.provider_formatter.parse_output_json(data, prompt)\n if out.usage_tokens:\n total_usage_tokens = (\n total_usage_tokens or 0\n ) + out.usage_tokens\n logging.debug("Updated total_usage_tokens: %d", total_usage_tokens)\n if out.prompt_usage_tokens:\n prompt_usage_tokens = out.prompt_usage_tokens\n logging.debug("Updated prompt_usage_tokens: %d", prompt_usage_tokens)\n combined_text += out.text\n\n if out.logprob_tokens:\n total_logprob_tokens = (\n total_logprob_tokens or 0\n ) + out.logprob_tokens\n logging.debug("Updated total_logprob_tokens: %d", total_logprob_tokens)\n except Exception as e:\n logging.error("Failed to parse response: %s with error %s", chunk, repr(e))\n response.failure(e)\n return\n assert t_first_token is not None, "empty response received"\n if (\n (total_logprob_tokens is not None)\n and (total_usage_tokens is not None)\n and total_logprob_tokens != total_usage_tokens\n ):\n print(\n f"WARNING: usage_tokens {total_usage_tokens} != logprob_tokens {total_logprob_tokens}"\n )\n if total_logprob_tokens is not None:\n num_tokens = total_logprob_tokens\n else:\n num_tokens = total_usage_tokens\n if self.tokenizer:\n num_tokenizer_tokens = len(self.tokenizer.encode(combined_text))\n if num_tokens is None:\n num_tokens = num_tokenizer_tokens\n elif num_tokens != num_tokenizer_tokens:\n print(\n f"WARNING: tokenizer token count {num_tokenizer_tokens} != {num_tokens} received from server"\n )\n num_tokens = num_tokens or 0\n num_chars = len(combined_text)\n now = time.perf_counter()\n dur_total = now - t_start\n dur_generation = now - t_first_token\n dur_first_token = t_first_token - t_start\n print(\n f"Response received: total {dur_total*1000:.2f} ms, first token {dur_first_token*1000:.2f} ms, {num_chars} chars, {num_tokens} tokens"\n )\n if self.environment.parsed_options.show_response:\n print("---")\n print(combined_text)\n print("---")\n if num_chars:\n add_custom_metric(\n "latency_per_char", dur_generation / num_chars * 1000, num_chars\n )\n if self.stream:\n add_custom_metric("time_to_first_token", dur_first_token * 1000)\n add_custom_metric("total_latency", dur_total * 1000)\n if num_tokens:\n if num_tokens != max_tokens:\n print(\n f"WARNING: wrong number of tokens: {num_tokens}, expected {max_tokens}"\n )\n add_custom_metric("num_tokens", num_tokens)\n add_custom_metric("max_tokens", max_tokens) # Add max_tokens metric\n add_custom_metric(\n "latency_per_token", dur_generation / num_tokens * 1000, num_tokens\n )\n add_custom_metric(\n "overall_latency_per_token",\n dur_total / num_tokens * 1000,\n num_tokens,\n )\n if (\n prompt_usage_tokens is not None\n and self.prompt_tokenizer_tokens is not None\n and prompt_usage_tokens != self.prompt_tokenizer_tokens\n ):\n print(\n f"WARNING: prompt usage tokens {prompt_usage_tokens} != {self.prompt_tokenizer_tokens} derived from local tokenizer"\n )\n prompt_tokens = prompt_usage_tokens or self.prompt_tokenizer_tokens\n if prompt_tokens:\n add_custom_metric("prompt_tokens", prompt_tokens)\n\n if not self.first_done:\n self.first_done = True\n InitTracker.notify_first_request()\n\n\n@events.init_command_line_parser.add_listener\ndef init_parser(parser):\n parser.add_argument(\n "--provider",\n choices=list(PROVIDER_CLASS_MAP.keys()),\n type=str,\n help="Which flavor of API to use. If not specified, we\'ll try to guess based on the URL and /v1/models output",\n )\n parser.add_argument(\n "-m",\n "--model",\n env_var="MODEL",\n type=str,\n help="The model to use for generating text. If not specified we will pick the first model from the service as returned by /v1/models",\n )\n parser.add_argument(\n "--chat",\n action=argparse.BooleanOptionalAction,\n default=False,\n help="Use /v1/chat/completions API",\n )\n parser.add_argument(\n "-p",\n "--prompt-tokens",\n env_var="PROMPT_TOKENS",\n type=int,\n default=512,\n help="Length of the prompt in tokens. Default 512",\n )\n parser.add_argument(\n "--prompt-chars",\n env_var="PROMPT_CHARS",\n type=int,\n help="Length of the prompt in characters.",\n )\n parser.add_argument(\n "--prompt-text",\n env_var="PROMPT_TEXT",\n type=str,\n help="Prompt text to use instead of generating one. It can be a file reference starting with an ampersand, e.g. `@prompt.txt`",\n )\n parser.add_argument(\n "--prompt-randomize",\n action=argparse.BooleanOptionalAction,\n default=False,\n help="Include a few random numbers in the generated prompt to avoid caching",\n )\n parser.add_argument(\n "-o",\n "--max-tokens",\n env_var="MAX_TOKENS",\n type=int,\n default=64,\n help="Max number of tokens to generate. If --max-tokens-distribution is non-constant this is going to be the mean. Defaults to 64",\n )\n parser.add_argument(\n "--max-tokens-cap",\n env_var="MAX_TOKENS_CAP",\n type=int,\n help="If --max-tokens-distribution is non-constant, this truncates the distribition at the specified limit",\n )\n parser.add_argument(\n "--max-tokens-distribution",\n env_var="MAX_TOKENS_DISTRIBUTION",\n type=str,\n choices=["constant", "uniform", "exponential", "normal"],\n default="constant",\n help="How to sample `max-tokens` on each request",\n )\n parser.add_argument(\n "--max-tokens-range",\n env_var="MAX_TOKENS_RANGE",\n type=float,\n default=0.3,\n help="Specifies the width of the distribution. Specified value `alpha` is relative to `max-tokens`. For uniform distribution we\'d sample from [max_tokens - max_tokens * alpha, max_tokens + max_tokens * alpha]. For normal distribution we\'d sample from `N(max_tokens, max_tokens * alpha)`. Defaults to 0.3",\n )\n parser.add_argument(\n "--stream",\n dest="stream",\n action=argparse.BooleanOptionalAction,\n default=True,\n help="Use the streaming API",\n )\n parser.add_argument(\n "-k",\n "--api-key",\n env_var="API_KEY",\n help="Auth for the API",\n )\n parser.add_argument(\n "--temperature",\n env_var="TEMPERATURE",\n type=float,\n default=1.0,\n help="Temperature parameter for the API",\n )\n parser.add_argument(\n "--logprobs",\n type=int,\n default=None,\n help="Whether to ask for logprobs, it makes things slower for some providers but is necessary for token count in streaming (unless it\'s Fireworks API that returns usage in streaming mode)",\n )\n parser.add_argument(\n "--summary-file",\n type=str,\n help="Append the line with the summary to the specified CSV file. Useful for generating a spreadsheet with perf sweep results. If the file doesn\'t exist, writes out the header first",\n )\n parser.add_argument(\n "--qps",\n type=float,\n default=None,\n help="Enabled \'fixed QPS\' mode where requests are issues at the specified rate regardless of how long the processing takes. In this case --users and --spawn-rate need to be set to a sufficiently high value (e.g. 100)",\n )\n parser.add_argument(\n "--qps-distribution",\n type=str,\n choices=["constant", "uniform", "exponential"],\n default="constant",\n help="Must be used with --qps. Specifies how to space out requests: equally (\'constant\') or by sampling wait times from a distribution (\'uniform\' or \'exponential\'). Expected QPS is going to match --qps",\n )\n parser.add_argument(\n "--burst",\n type=float,\n default=None,\n help="Makes requests to arrive in bursts every specified number of seconds. Note that burst duration has to be longer than maximum time of the response. Size of the burst is controlled by --users. The spawn rate -r is best set to a high value",\n )\n parser.add_argument(\n "--tokenizer",\n type=str,\n help="Specify HF tokenizer to use for validating the output of the model. It\'s optional, we\'re going to rely on \'usage\' or \'logprobs\' field to get token count information",\n )\n parser.add_argument(\n "--show-response",\n action=argparse.BooleanOptionalAction,\n default=False,\n help="Print the result of each generation",\n )\n\n\n@events.quitting.add_listener\ndef _(environment, **kw):\n total_latency = environment.stats.entries[("total_latency", "METRIC")]\n if environment.stats.total.num_failures > 0 or total_latency.num_requests == 0:\n print("Test failed due to failed requests")\n environment.process_exit_code = 1\n return\n\n entries = copy.copy(InitTracker.logging_params)\n if environment.parsed_options.qps is not None:\n entries["concurrency"] = (\n f"QPS {environment.parsed_options.qps} {environment.parsed_options.qps_distribution}"\n )\n else:\n entries["concurrency"] = InitTracker.users\n for metric_name in [\n "time_to_first_token",\n "latency_per_token",\n "num_tokens",\n "total_latency",\n "prompt_tokens", # might overwrite the static value based on server side tokenization\n ]:\n entries[metric_name] = environment.stats.entries[\n (metric_name, "METRIC")\n ].avg_response_time\n if not environment.parsed_options.stream:\n # if there\'s no streaming these metrics are meaningless\n entries["time_to_first_token"] = ""\n entries["latency_per_token"] = ""\n entries["num_requests"] = total_latency.num_requests\n entries["qps"] = total_latency.total_rps\n\n pretty_name = lambda s: " ".join([w.capitalize() for w in s.split("_")])\n entries = {pretty_name(k): v for k, v in entries.items()}\n\n # print in the final event handler to make sure our output is the last one\n @events.quit.add_listener\n def exit_printer(**kw):\n max_width = max(len(k) for k in entries.keys())\n print(" Summary ".center(80, "="))\n for k, v in entries.items():\n print(f"{k:<{max_width}}: {v}")\n print("=" * 80)\n\n if environment.parsed_options.summary_file:\n with open(environment.parsed_options.summary_file, "a") as f:\n writer = csv.DictWriter(f, fieldnames=entries.keys())\n if f.tell() == 0:\n writer.writeheader()\n writer.writerow(entries)\n', + line1: 1, + line2: 1225, + }, + ]), + }, + { + ftm_role: "assistant", + ftm_content: + "Let me also check the visualization script to understand how the results are visualized:", + ftm_tool_calls: [ + { + id: "toolu_01VxiuWGqPfbMSgprqkhe7uU", + function: { + arguments: + '{"paths": "/home/fupfv/git/benchmark1_0701/src/visualize_results.py", "skeleton": false}', + name: "cat", }, - ], - tool_call_id: "toolu_018ik5qU9MGsUpZY4Bn5f33o", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\n\n\nFinally, let's create a quick start guide for users who want to get started quickly:", - ftm_tool_calls: [ - { - id: "toolu_01EpCs5Tzt8XiiQnKzmo7TnN", - function: { - arguments: - '{"path": "/home/fupfv/git/benchmark1_0701/docs/QUICK_START.md", "content": "# LLM Load Testing Tool - Quick Start Guide\\n\\nThis guide will help you quickly set up and run your first load test with the LLM Load Testing Tool.\\n\\n## Prerequisites\\n\\n- Python 3.8 or higher\\n- pip (Python package manager)\\n- Access to an LLM inference server (or API key for a hosted service)\\n\\n## Installation\\n\\n1. Clone the repository (or download and extract the ZIP file):\\n\\n```bash\\ngit clone \\ncd llm-load-test\\n```\\n\\n2. Install the required dependencies:\\n\\n```bash\\npip install -r requirements.txt\\n```\\n\\n3. Make the scripts executable (optional, for Linux/macOS):\\n\\n```bash\\nbash make_scripts_executable.sh\\n```\\n\\n## Running Your First Test\\n\\n### Step 1: Configure the Test\\n\\nOpen `src/llm_load_test_runner.py` and modify the `base_config` dictionary with your LLM server details:\\n\\n```python\\nbase_config = {\\n \\"host\\": \\"https://your-llm-server.com/\\", # URL of your LLM server\\n \\"provider\\": \\"openai\\", # Provider type (openai, vllm, etc.)\\n \\"model\\": \\"your-model-name\\", # Model name\\n \\"api-key\\": \\"your-api-key\\", # API key (if required)\\n \\"logprobs\\": 5, # Log probabilities (optional)\\n \\"run-time\\": \\"60s\\", # Duration of each test\\n \\"temperature\\": 1.0, # Temperature for generation\\n}\\n```\\n\\n### Step 2: Run the Test\\n\\nRun the load test with the default parameters:\\n\\n```bash\\npython src/llm_load_test_runner.py\\n```\\n\\nThis will run tests with:\\n- Users: 1, 2, 50 (concurrency levels)\\n- Output tokens: 15, 30\\n- Run time: 60s per test\\n- Prompt tokens: 4046\\n- Temperature: 1.0\\n\\n### Step 3: View the Results\\n\\nAfter the tests complete, results will be saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\\n\\n- Individual test results: `results_test_u{users}_o{tokens}.csv`\\n- Summary reports: \\n - `load_test_report_YYYYMMDD_HHMMSS.csv`\\n - `load_test_report_YYYYMMDD_HHMMSS.json`\\n\\n### Step 4: Generate Visualizations\\n\\nGenerate visualizations from the test results:\\n\\n```bash\\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\\n```\\n\\nVisualizations will be saved in the `results/run_YYYYMMDD_HHMMSS/visualizations/` directory.\\n\\n## Customizing Your Tests\\n\\n### Changing Test Parameters\\n\\nTo customize the test parameters, modify these variables in `llm_load_test_runner.py`:\\n\\n```python\\nconcurrent_users = [1, 2, 5, 10, 20, 50] # Concurrency levels to test\\nmax_tokens = [1, 15, 30, 50] # Output token counts to test\\nbase_config[\\"run-time\\"] = \\"60s\\" # Duration of each test\\n```\\n\\n### Using a Different Prompt\\n\\nThe default prompt is defined in `load_test.py`. To use a different prompt, modify the `prompt` variable:\\n\\n```python\\nprompt = \\"\\"\\"Your custom prompt text here\\"\\"\\"\\n```\\n\\n### Testing with Different Providers\\n\\nThe tool supports various LLM providers. To use a different provider, set the `provider` field in `base_config`:\\n\\n```python\\nbase_config = {\\n \\"provider\\": \\"vllm\\", # Change to your desired provider\\n # Other configuration...\\n}\\n```\\n\\nSupported providers include:\\n- `openai`: OpenAI-compatible APIs\\n- `vllm`: vLLM server\\n- `fireworks`: Fireworks AI\\n- `together`: Together AI\\n- `triton-infer`: Triton Inference Server\\n- `triton-generate`: Triton Generate API\\n- `tgi`: Text Generation Inference\\n\\n## Common Issues and Solutions\\n\\n### Connection Errors\\n\\nIf you encounter connection errors:\\n- Check that your LLM server is running and accessible\\n- Verify that your API key is correct\\n- Check for any network restrictions\\n\\n### Timeout Errors\\n\\nIf requests are timing out:\\n- Increase the timeout settings\\n- Reduce the concurrency level\\n- Check server resource utilization\\n\\n### Missing Data in Visualizations\\n\\nIf visualizations are missing data:\\n- Check that the CSV files contain the expected columns\\n- Ensure the test completed successfully\\n- Try running with fewer concurrent users or shorter output lengths\\n\\n## Next Steps\\n\\nFor more detailed information:\\n\\n- [Full Documentation](docs/README.md): Comprehensive documentation of all features\\n- [Visualization Guide](docs/VISUALIZATION_GUIDE.md): Detailed guide to the visualization tools\\n- [Developer Guide](docs/DEVELOPER_GUIDE.md): Guide for developers who want to extend the tool\\n\\n## Getting Help\\n\\nIf you encounter any issues or have questions:\\n\\n- Check the [Troubleshooting](docs/README.md#troubleshooting) section in the documentation\\n- Open an issue on the GitHub repository\\n- Contact the maintainers"}', - name: "create_textdoc", - }, - type: "function", - index: 0, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01VxiuWGqPfbMSgprqkhe7uU", + ftm_content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/src/visualize_results.py\n", + }, + { + ftm_role: "context_file", + ftm_content: JSON.stringify([ + { + file_name: "src/visualize_results.py", + file_content: + "#!/usr/bin/env python3\n\"\"\"\nVisualization tool for LLM load test results.\nThis script generates various charts and visualizations from the benchmark results.\n\"\"\"\n\nimport os\nimport json\nimport glob\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nfrom datetime import datetime\n\nclass ResultsVisualizer:\n def __init__(self, results_dir, output_dir=None):\n \"\"\"\n Initialize the visualizer with the directory containing test results.\n \n Args:\n results_dir: Directory containing test result files\n output_dir: Directory to save visualizations (defaults to results_dir/visualizations)\n \"\"\"\n self.results_dir = Path(results_dir)\n if output_dir:\n self.output_dir = Path(output_dir)\n else:\n self.output_dir = self.results_dir / \"visualizations\"\n \n # Create output directory if it doesn't exist\n os.makedirs(self.output_dir, exist_ok=True)\n \n # Set style for plots\n sns.set_style(\"whitegrid\")\n plt.rcParams.update({\n 'figure.figsize': (12, 8),\n 'font.size': 12,\n 'axes.titlesize': 16,\n 'axes.labelsize': 14\n })\n \n # Load data\n self.data = self._load_data()\n \n def _load_data(self):\n \"\"\"Load and combine all CSV result files into a single DataFrame.\"\"\"\n all_files = glob.glob(str(self.results_dir / \"**\" / \"*.csv\"), recursive=True)\n \n # Filter out files that don't match the expected pattern\n result_files = [f for f in all_files if \"results_test\" in f or \"load_test_report\" in f]\n \n if not result_files:\n raise ValueError(f\"No result files found in {self.results_dir}\")\n \n print(f\"Found {len(result_files)} result files\")\n \n # Load all files into a list of dataframes\n dfs = []\n for file in result_files:\n try:\n df = pd.read_csv(file)\n # Add source file information\n df['source_file'] = os.path.basename(file)\n df['run_dir'] = os.path.basename(os.path.dirname(file))\n dfs.append(df)\n except Exception as e:\n print(f\"Error loading {file}: {e}\")\n \n if not dfs:\n raise ValueError(\"No valid data files could be loaded\")\n \n # Combine all dataframes\n combined_df = pd.concat(dfs, ignore_index=True)\n \n # Convert numeric columns\n numeric_cols = ['Time To First Token', 'Latency Per Token', 'Total Latency', \n 'Num Tokens', 'Num Requests', 'Qps', 'Prompt Tokens', \n 'Generation Tokens', 'Concurrency']\n \n for col in numeric_cols:\n if col in combined_df.columns:\n combined_df[col] = pd.to_numeric(combined_df[col], errors='coerce')\n \n # Extract user count and output token count from test_name\n if 'test_name' in combined_df.columns:\n combined_df['users'] = combined_df['test_name'].str.extract(r'test_u(\\d+)_o\\d+').astype(float)\n combined_df['output_tokens'] = combined_df['test_name'].str.extract(r'test_u\\d+_o(\\d+)').astype(float)\n \n return combined_df\n \n def plot_latency_by_concurrency(self):\n \"\"\"Plot latency metrics by concurrency level.\"\"\"\n if 'Concurrency' not in self.data.columns or 'Total Latency' not in self.data.columns:\n print(\"Required columns not found for latency by concurrency plot\")\n return\n \n plt.figure(figsize=(14, 8))\n \n # Group by concurrency and calculate mean latency\n grouped = self.data.groupby('Concurrency')[['Total Latency', 'Time To First Token', 'Latency Per Token']].mean().reset_index()\n \n # Plot\n plt.plot(grouped['Concurrency'], grouped['Total Latency'], 'o-', linewidth=2, label='Total Latency')\n plt.plot(grouped['Concurrency'], grouped['Time To First Token'], 's-', linewidth=2, label='Time To First Token')\n \n # Add second y-axis for latency per token\n ax2 = plt.gca().twinx()\n ax2.plot(grouped['Concurrency'], grouped['Latency Per Token'], '^-', color='green', linewidth=2, label='Latency Per Token')\n ax2.set_ylabel('Latency Per Token (ms)', color='green')\n ax2.tick_params(axis='y', colors='green')\n \n plt.title('Latency Metrics by Concurrency Level')\n plt.xlabel('Concurrent Users')\n plt.ylabel('Latency (ms)')\n plt.grid(True)\n \n # Combine legends from both axes\n lines1, labels1 = plt.gca().get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n plt.legend(lines1 + lines2, labels1 + labels2, loc='upper left')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'latency_by_concurrency.png')\n plt.close()\n \n def plot_throughput_by_concurrency(self):\n \"\"\"Plot throughput (QPS) by concurrency level.\"\"\"\n if 'Concurrency' not in self.data.columns or 'Qps' not in self.data.columns:\n print(\"Required columns not found for throughput plot\")\n return\n \n plt.figure(figsize=(12, 8))\n \n # Group by concurrency and calculate mean QPS\n grouped = self.data.groupby('Concurrency')['Qps'].mean().reset_index()\n \n # Plot\n sns.barplot(x='Concurrency', y='Qps', data=grouped)\n \n plt.title('Throughput (QPS) by Concurrency Level')\n plt.xlabel('Concurrent Users')\n plt.ylabel('Queries Per Second')\n plt.grid(True, axis='y')\n \n # Add value labels on top of bars\n for i, v in enumerate(grouped['Qps']):\n plt.text(i, v + 0.1, f\"{v:.2f}\", ha='center')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'throughput_by_concurrency.png')\n plt.close()\n \n def plot_latency_by_output_tokens(self):\n \"\"\"Plot latency metrics by output token count.\"\"\"\n if 'output_tokens' not in self.data.columns or 'Total Latency' not in self.data.columns:\n print(\"Required columns not found for latency by output tokens plot\")\n return\n \n plt.figure(figsize=(14, 8))\n \n # Group by output tokens and calculate mean latency\n grouped = self.data.groupby('output_tokens')[['Total Latency', 'Time To First Token', 'Latency Per Token']].mean().reset_index()\n \n # Plot\n plt.plot(grouped['output_tokens'], grouped['Total Latency'], 'o-', linewidth=2, label='Total Latency')\n plt.plot(grouped['output_tokens'], grouped['Time To First Token'], 's-', linewidth=2, label='Time To First Token')\n \n # Add second y-axis for latency per token\n ax2 = plt.gca().twinx()\n ax2.plot(grouped['output_tokens'], grouped['Latency Per Token'], '^-', color='green', linewidth=2, label='Latency Per Token')\n ax2.set_ylabel('Latency Per Token (ms)', color='green')\n ax2.tick_params(axis='y', colors='green')\n \n plt.title('Latency Metrics by Output Token Count')\n plt.xlabel('Output Tokens')\n plt.ylabel('Latency (ms)')\n plt.grid(True)\n \n # Combine legends from both axes\n lines1, labels1 = plt.gca().get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n plt.legend(lines1 + lines2, labels1 + labels2, loc='upper left')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'latency_by_output_tokens.png')\n plt.close()\n \n def plot_heatmap_latency(self):\n \"\"\"Create a heatmap of latency by concurrency and output tokens.\"\"\"\n if 'users' not in self.data.columns or 'output_tokens' not in self.data.columns:\n print(\"Required columns not found for heatmap plot\")\n return\n \n # Group by users and output_tokens and calculate mean latency\n pivot = self.data.pivot_table(\n index='users', \n columns='output_tokens', \n values='Total Latency',\n aggfunc='mean'\n )\n \n plt.figure(figsize=(12, 10))\n sns.heatmap(pivot, annot=True, fmt=\".1f\", cmap=\"YlGnBu\", linewidths=.5)\n \n plt.title('Total Latency (ms) by Concurrency and Output Tokens')\n plt.xlabel('Output Tokens')\n plt.ylabel('Concurrent Users')\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'heatmap_latency.png')\n plt.close()\n \n def plot_model_comparison(self):\n \"\"\"Compare performance across different models if available.\"\"\"\n if 'Model' not in self.data.columns:\n print(\"Model column not found for model comparison plot\")\n return\n \n # Check if we have multiple models\n models = self.data['Model'].unique()\n if len(models) <= 1:\n print(\"Only one model found, skipping model comparison\")\n return\n \n plt.figure(figsize=(14, 10))\n \n # Create subplot for latency comparison\n plt.subplot(2, 1, 1)\n sns.boxplot(x='Model', y='Total Latency', data=self.data)\n plt.title('Latency Comparison Across Models')\n plt.ylabel('Total Latency (ms)')\n plt.xticks(rotation=45)\n \n # Create subplot for throughput comparison\n plt.subplot(2, 1, 2)\n sns.boxplot(x='Model', y='Qps', data=self.data)\n plt.title('Throughput Comparison Across Models')\n plt.ylabel('Queries Per Second')\n plt.xticks(rotation=45)\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'model_comparison.png')\n plt.close()\n \n def plot_run_comparison(self):\n \"\"\"Compare performance across different test runs.\"\"\"\n if 'run_dir' not in self.data.columns:\n print(\"Run directory column not found for run comparison plot\")\n return\n \n # Check if we have multiple runs\n runs = self.data['run_dir'].unique()\n if len(runs) <= 1:\n print(\"Only one run found, skipping run comparison\")\n return\n \n plt.figure(figsize=(16, 12))\n \n # Create subplot for latency comparison\n plt.subplot(2, 1, 1)\n sns.boxplot(x='run_dir', y='Total Latency', data=self.data)\n plt.title('Latency Comparison Across Test Runs')\n plt.ylabel('Total Latency (ms)')\n plt.xticks(rotation=90)\n \n # Create subplot for throughput comparison\n plt.subplot(2, 1, 2)\n sns.boxplot(x='run_dir', y='Qps', data=self.data)\n plt.title('Throughput Comparison Across Test Runs')\n plt.ylabel('Queries Per Second')\n plt.xticks(rotation=90)\n \n plt.tight_layout()\n plt.savefig(self.output_dir / 'run_comparison.png')\n plt.close()\n \n def plot_latency_distribution(self):\n \"\"\"Plot the distribution of latency values.\"\"\"\n if 'Total Latency' not in self.data.columns:\n print(\"Total Latency column not found for latency distribution plot\")\n return\n \n plt.figure(figsize=(12, 8))\n \n # Create histogram with KDE\n sns.histplot(self.data['Total Latency'].dropna(), kde=True, bins=30)\n \n plt.title('Distribution of Total Latency')\n plt.xlabel('Total Latency (ms)')\n plt.ylabel('Frequency')\n \n # Add vertical line for mean and median\n mean_latency = self.data['Total Latency'].mean()\n median_latency = self.data['Total Latency'].median()\n \n plt.axvline(mean_latency, color='r', linestyle='--', label=f'Mean: {mean_latency:.2f} ms')\n plt.axvline(median_latency, color='g', linestyle='-.', label=f'Median: {median_latency:.2f} ms')\n \n plt.legend()\n plt.tight_layout()\n plt.savefig(self.output_dir / 'latency_distribution.png')\n plt.close()\n \n def plot_token_generation_speed(self):\n \"\"\"Plot token generation speed (tokens per second) by concurrency.\"\"\"\n if 'Num Tokens' not in self.data.columns or 'Total Latency' not in self.data.columns:\n print(\"Required columns not found for token generation speed plot\")\n return\n \n # Calculate tokens per second\n self.data['tokens_per_second'] = self.data['Num Tokens'] / (self.data['Total Latency'] / 1000)\n \n plt.figure(figsize=(12, 8))\n \n # Group by concurrency and calculate mean tokens per second\n if 'Concurrency' in self.data.columns:\n grouped = self.data.groupby('Concurrency')['tokens_per_second'].mean().reset_index()\n \n # Plot\n sns.barplot(x='Concurrency', y='tokens_per_second', data=grouped)\n \n plt.title('Token Generation Speed by Concurrency Level')\n plt.xlabel('Concurrent Users')\n plt.ylabel('Tokens Per Second')\n \n # Add value labels on top of bars\n for i, v in enumerate(grouped['tokens_per_second']):\n plt.text(i, v + 0.1, f\"{v:.2f}\", ha='center')\n else:\n # If no concurrency data, just plot overall distribution\n sns.histplot(self.data['tokens_per_second'].dropna(), kde=True, bins=30)\n plt.title('Distribution of Token Generation Speed')\n plt.xlabel('Tokens Per Second')\n plt.ylabel('Frequency')\n \n plt.grid(True, axis='y')\n plt.tight_layout()\n plt.savefig(self.output_dir / 'token_generation_speed.png')\n plt.close()\n \n def generate_summary_report(self):\n \"\"\"Generate a text summary report with key statistics.\"\"\"\n if self.data.empty:\n print(\"No data available for summary report\")\n return\n \n # Calculate summary statistics\n summary = {\n 'total_tests': len(self.data),\n 'unique_models': self.data['Model'].nunique() if 'Model' in self.data.columns else 0,\n 'unique_runs': self.data['run_dir'].nunique() if 'run_dir' in self.data.columns else 0,\n 'avg_latency': self.data['Total Latency'].mean() if 'Total Latency' in self.data.columns else None,\n 'median_latency': self.data['Total Latency'].median() if 'Total Latency' in self.data.columns else None,\n 'avg_qps': self.data['Qps'].mean() if 'Qps' in self.data.columns else None,\n 'max_concurrency': self.data['Concurrency'].max() if 'Concurrency' in self.data.columns else None,\n 'timestamp': datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n }\n \n # Create summary report\n report = f\"\"\"\n # LLM Load Test Summary Report\n \n Generated: {summary['timestamp']}\n \n ## Overview\n - Total tests analyzed: {summary['total_tests']}\n - Unique models tested: {summary['unique_models']}\n - Unique test runs: {summary['unique_runs']}\n \n ## Performance Metrics\n - Average latency: {summary['avg_latency']:.2f} ms\n - Median latency: {summary['median_latency']:.2f} ms\n - Average throughput: {summary['avg_qps']:.2f} QPS\n - Maximum concurrency tested: {summary['max_concurrency']}\n \n ## Visualizations\n The following visualization files have been generated in {self.output_dir}:\n - latency_by_concurrency.png\n - throughput_by_concurrency.png\n - latency_by_output_tokens.png\n - heatmap_latency.png\n - latency_distribution.png\n - token_generation_speed.png\n \"\"\"\n \n if summary['unique_models'] > 1:\n report += \"- model_comparison.png\\n\"\n \n if summary['unique_runs'] > 1:\n report += \"- run_comparison.png\\n\"\n \n # Write report to file\n with open(self.output_dir / 'summary_report.md', 'w') as f:\n f.write(report)\n \n print(f\"Summary report generated at {self.output_dir / 'summary_report.md'}\")\n \n def generate_all_visualizations(self):\n \"\"\"Generate all available visualizations.\"\"\"\n print(\"Generating visualizations...\")\n \n self.plot_latency_by_concurrency()\n self.plot_throughput_by_concurrency()\n self.plot_latency_by_output_tokens()\n self.plot_heatmap_latency()\n self.plot_model_comparison()\n self.plot_run_comparison()\n self.plot_latency_distribution()\n self.plot_token_generation_speed()\n self.generate_summary_report()\n \n print(f\"All visualizations generated in {self.output_dir}\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Generate visualizations for LLM load test results')\n parser.add_argument('--results-dir', type=str, default='results',\n help='Directory containing test results (default: results)')\n parser.add_argument('--output-dir', type=str, default=None,\n help='Directory to save visualizations (default: results_dir/visualizations)')\n parser.add_argument('--run', type=str, default=None,\n help='Specific run directory to analyze (default: analyze all runs)')\n \n args = parser.parse_args()\n \n # Determine the results directory\n base_dir = Path(args.results_dir)\n \n if args.run:\n # If a specific run is specified, use that directory\n results_dir = base_dir / args.run\n if not results_dir.exists():\n print(f\"Run directory {results_dir} not found\")\n return\n else:\n # Otherwise use the base results directory\n results_dir = base_dir\n \n try:\n visualizer = ResultsVisualizer(results_dir, args.output_dir)\n visualizer.generate_all_visualizations()\n except Exception as e:\n print(f\"Error generating visualizations: {e}\")\n\n\nif __name__ == \"__main__\":\n main()\n", + line1: 1, + line2: 449, + }, + ]), + }, + { + ftm_role: "assistant", + ftm_content: + "Based on my analysis of the project, I'll create comprehensive documentation for the LLM Load Testing Tool. Let me create a documentation structure that covers all aspects of the project.", + ftm_tool_calls: [ + { + id: "toolu_011nt32gEpSJZet4jGzrVXBt", + function: { + arguments: + '{"path": "/home/fupfv/git/benchmark1_0701/docs/README.md", "content": "# LLM Load Testing Tool Documentation\\n\\nWelcome to the comprehensive documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\\n\\n## Table of Contents\\n\\n1. [Introduction](#introduction)\\n2. [Getting Started](#getting-started)\\n - [Installation](#installation)\\n - [Quick Start](#quick-start)\\n3. [Core Components](#core-components)\\n4. [Running Tests](#running-tests)\\n - [Basic Usage](#basic-usage)\\n - [Configuration Options](#configuration-options)\\n - [Test Parameters](#test-parameters)\\n5. [Understanding Results](#understanding-results)\\n - [Report Files](#report-files)\\n - [Metrics Explained](#metrics-explained)\\n6. [Visualization Tools](#visualization-tools)\\n - [Single Run Visualization](#single-run-visualization)\\n - [Run Comparison](#run-comparison)\\n - [Grafana Dashboard](#grafana-dashboard)\\n7. [Advanced Usage](#advanced-usage)\\n - [Custom Prompts](#custom-prompts)\\n - [Provider-Specific Options](#provider-specific-options)\\n - [Scaling Tests](#scaling-tests)\\n8. [Troubleshooting](#troubleshooting)\\n9. [API Reference](#api-reference)\\n10. [Contributing](#contributing)\\n\\n## Introduction\\n\\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\\n\\nKey features include:\\n- Testing with different concurrency levels and output token counts\\n- Comprehensive metrics collection (latency, throughput, token generation speed)\\n- Detailed visualization of test results\\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\\n- Comparison of different test runs and models\\n\\n## Getting Started\\n\\n### Installation\\n\\n1. Clone the repository:\\n```bash\\ngit clone \\ncd llm-load-test\\n```\\n\\n2. Install dependencies:\\n```bash\\npip install -r requirements.txt\\n```\\n\\n3. Make scripts executable (optional):\\n```bash\\nbash make_scripts_executable.sh\\n```\\n\\n### Quick Start\\n\\nRun a basic load test with default parameters:\\n\\n```bash\\npython src/llm_load_test_runner.py\\n```\\n\\nThis will run tests with the default configuration:\\n- Users: 1, 2, 50\\n- Output tokens: 15, 30\\n- Run time: 60s per test\\n- Prompt tokens: 4046\\n- Temperature: 1.0\\n\\n## Core Components\\n\\nThe tool consists of several key components:\\n\\n1. **llm_load_test_runner.py**: The main script that orchestrates the load tests.\\n2. **load_test.py**: The Locust implementation that handles the actual request generation.\\n3. **llm_test_logger.py**: Handles logging of test results.\\n4. **visualize_results.py**: Generates visualizations from test results.\\n5. **compare_runs.py**: Compares results from different test runs.\\n6. **dashboard_generator.py**: Creates Grafana dashboard configurations.\\n\\n## Running Tests\\n\\n### Basic Usage\\n\\nThe simplest way to run a test is:\\n\\n```bash\\npython src/llm_load_test_runner.py\\n```\\n\\n### Configuration Options\\n\\nYou can customize the test by modifying the `base_config` dictionary in `llm_load_test_runner.py`:\\n\\n```python\\nbase_config = {\\n \\"host\\": \\"https://your-llm-server.com/\\",\\n \\"provider\\": \\"openai\\",\\n \\"model\\": \\"your-model-name\\",\\n \\"api-key\\": \\"your-api-key\\",\\n \\"logprobs\\": 5,\\n \\"run-time\\": \\"60s\\",\\n \\"temperature\\": 1.0,\\n}\\n```\\n\\n### Test Parameters\\n\\nThe main test parameters you can adjust include:\\n\\n- **concurrent_users**: List of user counts to test (e.g., `[1, 2, 5, 10, 20, 50]`)\\n- **max_tokens**: List of output token counts to test (e.g., `[1, 15, 30, 50]`)\\n- **run-time**: Duration of each test (e.g., `\\"60s\\"`)\\n- **prompt-tokens**: Number of tokens in the prompt (default: 4046)\\n- **temperature**: Temperature parameter for generation (default: 1.0)\\n\\nExample of customizing test parameters:\\n\\n```python\\nconcurrent_users = [1, 5, 10]\\nmax_tokens = [10, 50, 100]\\nbase_config[\\"run-time\\"] = \\"120s\\"\\n```\\n\\n## Understanding Results\\n\\n### Report Files\\n\\nAfter running tests, results are saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\\n\\n1. **Individual test results**: `results_test_u{users}_o{tokens}.csv`\\n2. **Summary reports**:\\n - `load_test_report_YYYYMMDD_HHMMSS.csv`\\n - `load_test_report_YYYYMMDD_HHMMSS.json`\\n\\n### Metrics Explained\\n\\nThe test collects and reports the following metrics:\\n\\n- **Total Latency**: Total time from request to complete response (ms)\\n- **Time To First Token**: Time until the first token is received (ms)\\n- **Latency Per Token**: Average time to generate each token (ms)\\n- **Num Tokens**: Number of tokens generated\\n- **Num Requests**: Total number of requests processed\\n- **Qps**: Queries per second (throughput)\\n- **Prompt Tokens**: Number of tokens in the prompt\\n- **Generation Tokens**: Number of tokens requested to generate\\n\\n## Visualization Tools\\n\\nThe project includes several visualization tools to help analyze test results.\\n\\n### Single Run Visualization\\n\\nGenerate visualizations for a single test run:\\n\\n```bash\\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\\n```\\n\\nThis generates the following visualizations:\\n- Latency by concurrency level\\n- Throughput (QPS) by concurrency level\\n- Latency by output token count\\n- Heatmap of latency by concurrency and output tokens\\n- Latency distribution\\n- Token generation speed\\n- Summary report\\n\\n### Run Comparison\\n\\nCompare results across multiple test runs:\\n\\n```bash\\npython src/compare_runs.py --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\\n```\\n\\nThis generates comparison visualizations showing:\\n- Latency metrics comparison across runs\\n- Throughput comparison across runs\\n- Performance by concurrency level across runs\\n- Performance by output token count across runs\\n\\n### Grafana Dashboard\\n\\nCreate a Grafana dashboard configuration for real-time monitoring:\\n\\n```bash\\npython src/dashboard_generator.py --output-file grafana-dashboard.json\\n```\\n\\nTo use the dashboard:\\n1. Open Grafana in your web browser\\n2. Navigate to Dashboards > Import\\n3. Upload the generated JSON file or paste its contents\\n4. Configure the data source if prompted\\n5. Click Import\\n\\n## Advanced Usage\\n\\n### Custom Prompts\\n\\nYou can customize the prompt used for testing by modifying the `prompt` variable in `load_test.py`:\\n\\n```python\\nprompt = \\"\\"\\"Your custom prompt text here\\"\\"\\"\\n```\\n\\nFor more complex scenarios, you can use a file with multiple prompts:\\n\\n```bash\\npython src/llm_load_test_runner.py --prompt-text @prompts.jsonl\\n```\\n\\n### Provider-Specific Options\\n\\nThe tool supports various LLM providers, each with specific configuration options:\\n\\n- **OpenAI/Compatible APIs**: Uses the standard OpenAI API format\\n- **vLLM**: Optimized for vLLM servers with specific parameters\\n- **Fireworks**: Includes Fireworks-specific optimizations\\n- **Triton**: Support for Triton Inference Server\\n\\nProvider-specific options can be set in the `format_payload` method of each provider class in `load_test.py`.\\n\\n### Scaling Tests\\n\\nFor large-scale testing, consider:\\n\\n1. **Increasing run time**: Set longer test durations for more stable results\\n ```python\\n base_config[\\"run-time\\"] = \\"300s\\" # 5 minutes per test\\n ```\\n\\n2. **Testing with more concurrent users**: Add higher concurrency levels\\n ```python\\n concurrent_users = [1, 10, 50, 100, 200]\\n ```\\n\\n3. **Using QPS mode**: Instead of fixed users, target a specific query rate\\n ```bash\\n python src/load_test.py --qps 10 --users 100\\n ```\\n\\n## Troubleshooting\\n\\nCommon issues and solutions:\\n\\n1. **Connection errors**: Check your API key and server URL\\n2. **Timeout errors**: Increase timeout settings or reduce concurrency\\n3. **Inconsistent results**: Increase test duration for more stable metrics\\n4. **Missing data in visualizations**: Ensure CSV files contain all expected columns\\n5. **Memory errors**: For large datasets, consider filtering data or using a machine with more memory\\n\\n## API Reference\\n\\n### LLMLoadTest Class\\n\\nThe main class that orchestrates the load tests.\\n\\n**Methods**:\\n- `__init__(base_config, results_dir)`: Initialize with configuration and results directory\\n- `run_test(test_name, users, output_tokens)`: Run a single test with specified parameters\\n- `write_test_report(...)`: Write test results to CSV and JSON files\\n- `generate_report()`: Generate a summary report of all tests\\n\\n### ResultsVisualizer Class\\n\\nGenerates visualizations from test results.\\n\\n**Methods**:\\n- `__init__(results_dir, output_dir)`: Initialize with results and output directories\\n- `generate_all_visualizations()`: Generate all available visualizations\\n- Various plotting methods for specific visualizations\\n\\n## Contributing\\n\\nContributions to the LLM Load Testing Tool are welcome! Here\'s how you can contribute:\\n\\n1. **Bug reports**: Open an issue describing the bug and steps to reproduce\\n2. **Feature requests**: Open an issue describing the desired feature\\n3. **Code contributions**: Fork the repository, make changes, and submit a pull request\\n4. **Documentation improvements**: Submit pull requests with documentation updates\\n\\nPlease follow the existing code style and include tests for new features."}', + name: "create_textdoc", }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: "/home/fupfv/git/benchmark1_0701/docs/QUICK_START.md", - file_action: "edit", - line1: 1, - line2: 1, - lines_remove: "", - lines_add: - '# LLM Load Testing Tool - Quick Start Guide\n\nThis guide will help you quickly set up and run your first load test with the LLM Load Testing Tool.\n\n## Prerequisites\n\n- Python 3.8 or higher\n- pip (Python package manager)\n- Access to an LLM inference server (or API key for a hosted service)\n\n## Installation\n\n1. Clone the repository (or download and extract the ZIP file):\n\n```bash\ngit clone \ncd llm-load-test\n```\n\n2. Install the required dependencies:\n\n```bash\npip install -r requirements.txt\n```\n\n3. Make the scripts executable (optional, for Linux/macOS):\n\n```bash\nbash make_scripts_executable.sh\n```\n\n## Running Your First Test\n\n### Step 1: Configure the Test\n\nOpen `src/llm_load_test_runner.py` and modify the `base_config` dictionary with your LLM server details:\n\n```python\nbase_config = {\n "host": "https://your-llm-server.com/", # URL of your LLM server\n "provider": "openai", # Provider type (openai, vllm, etc.)\n "model": "your-model-name", # Model name\n "api-key": "your-api-key", # API key (if required)\n "logprobs": 5, # Log probabilities (optional)\n "run-time": "60s", # Duration of each test\n "temperature": 1.0, # Temperature for generation\n}\n```\n\n### Step 2: Run the Test\n\nRun the load test with the default parameters:\n\n```bash\npython src/llm_load_test_runner.py\n```\n\nThis will run tests with:\n- Users: 1, 2, 50 (concurrency levels)\n- Output tokens: 15, 30\n- Run time: 60s per test\n- Prompt tokens: 4046\n- Temperature: 1.0\n\n### Step 3: View the Results\n\nAfter the tests complete, results will be saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\n\n- Individual test results: `results_test_u{users}_o{tokens}.csv`\n- Summary reports: \n - `load_test_report_YYYYMMDD_HHMMSS.csv`\n - `load_test_report_YYYYMMDD_HHMMSS.json`\n\n### Step 4: Generate Visualizations\n\nGenerate visualizations from the test results:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\nVisualizations will be saved in the `results/run_YYYYMMDD_HHMMSS/visualizations/` directory.\n\n## Customizing Your Tests\n\n### Changing Test Parameters\n\nTo customize the test parameters, modify these variables in `llm_load_test_runner.py`:\n\n```python\nconcurrent_users = [1, 2, 5, 10, 20, 50] # Concurrency levels to test\nmax_tokens = [1, 15, 30, 50] # Output token counts to test\nbase_config["run-time"] = "60s" # Duration of each test\n```\n\n### Using a Different Prompt\n\nThe default prompt is defined in `load_test.py`. To use a different prompt, modify the `prompt` variable:\n\n```python\nprompt = """Your custom prompt text here"""\n```\n\n### Testing with Different Providers\n\nThe tool supports various LLM providers. To use a different provider, set the `provider` field in `base_config`:\n\n```python\nbase_config = {\n "provider": "vllm", # Change to your desired provider\n # Other configuration...\n}\n```\n\nSupported providers include:\n- `openai`: OpenAI-compatible APIs\n- `vllm`: vLLM server\n- `fireworks`: Fireworks AI\n- `together`: Together AI\n- `triton-infer`: Triton Inference Server\n- `triton-generate`: Triton Generate API\n- `tgi`: Text Generation Inference\n\n## Common Issues and Solutions\n\n### Connection Errors\n\nIf you encounter connection errors:\n- Check that your LLM server is running and accessible\n- Verify that your API key is correct\n- Check for any network restrictions\n\n### Timeout Errors\n\nIf requests are timing out:\n- Increase the timeout settings\n- Reduce the concurrency level\n- Check server resource utilization\n\n### Missing Data in Visualizations\n\nIf visualizations are missing data:\n- Check that the CSV files contain the expected columns\n- Ensure the test completed successfully\n- Try running with fewer concurrent users or shorter output lengths\n\n## Next Steps\n\nFor more detailed information:\n\n- [Full Documentation](docs/README.md): Comprehensive documentation of all features\n- [Visualization Guide](docs/VISUALIZATION_GUIDE.md): Detailed guide to the visualization tools\n- [Developer Guide](docs/DEVELOPER_GUIDE.md): Guide for developers who want to extend the tool\n\n## Getting Help\n\nIf you encounter any issues or have questions:\n\n- Check the [Troubleshooting](docs/README.md#troubleshooting) section in the documentation\n- Open an issue on the GitHub repository\n- Contact the maintainers\n', - file_name_rename: null, - application_details: "", + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: "/home/fupfv/git/benchmark1_0701/docs/README.md", + file_action: "edit", + line1: 1, + line2: 1, + lines_remove: "", + lines_add: + '# LLM Load Testing Tool Documentation\n\nWelcome to the comprehensive documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\n\n## Table of Contents\n\n1. [Introduction](#introduction)\n2. [Getting Started](#getting-started)\n - [Installation](#installation)\n - [Quick Start](#quick-start)\n3. [Core Components](#core-components)\n4. [Running Tests](#running-tests)\n - [Basic Usage](#basic-usage)\n - [Configuration Options](#configuration-options)\n - [Test Parameters](#test-parameters)\n5. [Understanding Results](#understanding-results)\n - [Report Files](#report-files)\n - [Metrics Explained](#metrics-explained)\n6. [Visualization Tools](#visualization-tools)\n - [Single Run Visualization](#single-run-visualization)\n - [Run Comparison](#run-comparison)\n - [Grafana Dashboard](#grafana-dashboard)\n7. [Advanced Usage](#advanced-usage)\n - [Custom Prompts](#custom-prompts)\n - [Provider-Specific Options](#provider-specific-options)\n - [Scaling Tests](#scaling-tests)\n8. [Troubleshooting](#troubleshooting)\n9. [API Reference](#api-reference)\n10. [Contributing](#contributing)\n\n## Introduction\n\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\n\nKey features include:\n- Testing with different concurrency levels and output token counts\n- Comprehensive metrics collection (latency, throughput, token generation speed)\n- Detailed visualization of test results\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\n- Comparison of different test runs and models\n\n## Getting Started\n\n### Installation\n\n1. Clone the repository:\n```bash\ngit clone \ncd llm-load-test\n```\n\n2. Install dependencies:\n```bash\npip install -r requirements.txt\n```\n\n3. Make scripts executable (optional):\n```bash\nbash make_scripts_executable.sh\n```\n\n### Quick Start\n\nRun a basic load test with default parameters:\n\n```bash\npython src/llm_load_test_runner.py\n```\n\nThis will run tests with the default configuration:\n- Users: 1, 2, 50\n- Output tokens: 15, 30\n- Run time: 60s per test\n- Prompt tokens: 4046\n- Temperature: 1.0\n\n## Core Components\n\nThe tool consists of several key components:\n\n1. **llm_load_test_runner.py**: The main script that orchestrates the load tests.\n2. **load_test.py**: The Locust implementation that handles the actual request generation.\n3. **llm_test_logger.py**: Handles logging of test results.\n4. **visualize_results.py**: Generates visualizations from test results.\n5. **compare_runs.py**: Compares results from different test runs.\n6. **dashboard_generator.py**: Creates Grafana dashboard configurations.\n\n## Running Tests\n\n### Basic Usage\n\nThe simplest way to run a test is:\n\n```bash\npython src/llm_load_test_runner.py\n```\n\n### Configuration Options\n\nYou can customize the test by modifying the `base_config` dictionary in `llm_load_test_runner.py`:\n\n```python\nbase_config = {\n "host": "https://your-llm-server.com/",\n "provider": "openai",\n "model": "your-model-name",\n "api-key": "your-api-key",\n "logprobs": 5,\n "run-time": "60s",\n "temperature": 1.0,\n}\n```\n\n### Test Parameters\n\nThe main test parameters you can adjust include:\n\n- **concurrent_users**: List of user counts to test (e.g., `[1, 2, 5, 10, 20, 50]`)\n- **max_tokens**: List of output token counts to test (e.g., `[1, 15, 30, 50]`)\n- **run-time**: Duration of each test (e.g., `"60s"`)\n- **prompt-tokens**: Number of tokens in the prompt (default: 4046)\n- **temperature**: Temperature parameter for generation (default: 1.0)\n\nExample of customizing test parameters:\n\n```python\nconcurrent_users = [1, 5, 10]\nmax_tokens = [10, 50, 100]\nbase_config["run-time"] = "120s"\n```\n\n## Understanding Results\n\n### Report Files\n\nAfter running tests, results are saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\n\n1. **Individual test results**: `results_test_u{users}_o{tokens}.csv`\n2. **Summary reports**:\n - `load_test_report_YYYYMMDD_HHMMSS.csv`\n - `load_test_report_YYYYMMDD_HHMMSS.json`\n\n### Metrics Explained\n\nThe test collects and reports the following metrics:\n\n- **Total Latency**: Total time from request to complete response (ms)\n- **Time To First Token**: Time until the first token is received (ms)\n- **Latency Per Token**: Average time to generate each token (ms)\n- **Num Tokens**: Number of tokens generated\n- **Num Requests**: Total number of requests processed\n- **Qps**: Queries per second (throughput)\n- **Prompt Tokens**: Number of tokens in the prompt\n- **Generation Tokens**: Number of tokens requested to generate\n\n## Visualization Tools\n\nThe project includes several visualization tools to help analyze test results.\n\n### Single Run Visualization\n\nGenerate visualizations for a single test run:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\nThis generates the following visualizations:\n- Latency by concurrency level\n- Throughput (QPS) by concurrency level\n- Latency by output token count\n- Heatmap of latency by concurrency and output tokens\n- Latency distribution\n- Token generation speed\n- Summary report\n\n### Run Comparison\n\nCompare results across multiple test runs:\n\n```bash\npython src/compare_runs.py --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\n```\n\nThis generates comparison visualizations showing:\n- Latency metrics comparison across runs\n- Throughput comparison across runs\n- Performance by concurrency level across runs\n- Performance by output token count across runs\n\n### Grafana Dashboard\n\nCreate a Grafana dashboard configuration for real-time monitoring:\n\n```bash\npython src/dashboard_generator.py --output-file grafana-dashboard.json\n```\n\nTo use the dashboard:\n1. Open Grafana in your web browser\n2. Navigate to Dashboards > Import\n3. Upload the generated JSON file or paste its contents\n4. Configure the data source if prompted\n5. Click Import\n\n## Advanced Usage\n\n### Custom Prompts\n\nYou can customize the prompt used for testing by modifying the `prompt` variable in `load_test.py`:\n\n```python\nprompt = """Your custom prompt text here"""\n```\n\nFor more complex scenarios, you can use a file with multiple prompts:\n\n```bash\npython src/llm_load_test_runner.py --prompt-text @prompts.jsonl\n```\n\n### Provider-Specific Options\n\nThe tool supports various LLM providers, each with specific configuration options:\n\n- **OpenAI/Compatible APIs**: Uses the standard OpenAI API format\n- **vLLM**: Optimized for vLLM servers with specific parameters\n- **Fireworks**: Includes Fireworks-specific optimizations\n- **Triton**: Support for Triton Inference Server\n\nProvider-specific options can be set in the `format_payload` method of each provider class in `load_test.py`.\n\n### Scaling Tests\n\nFor large-scale testing, consider:\n\n1. **Increasing run time**: Set longer test durations for more stable results\n ```python\n base_config["run-time"] = "300s" # 5 minutes per test\n ```\n\n2. **Testing with more concurrent users**: Add higher concurrency levels\n ```python\n concurrent_users = [1, 10, 50, 100, 200]\n ```\n\n3. **Using QPS mode**: Instead of fixed users, target a specific query rate\n ```bash\n python src/load_test.py --qps 10 --users 100\n ```\n\n## Troubleshooting\n\nCommon issues and solutions:\n\n1. **Connection errors**: Check your API key and server URL\n2. **Timeout errors**: Increase timeout settings or reduce concurrency\n3. **Inconsistent results**: Increase test duration for more stable metrics\n4. **Missing data in visualizations**: Ensure CSV files contain all expected columns\n5. **Memory errors**: For large datasets, consider filtering data or using a machine with more memory\n\n## API Reference\n\n### LLMLoadTest Class\n\nThe main class that orchestrates the load tests.\n\n**Methods**:\n- `__init__(base_config, results_dir)`: Initialize with configuration and results directory\n- `run_test(test_name, users, output_tokens)`: Run a single test with specified parameters\n- `write_test_report(...)`: Write test results to CSV and JSON files\n- `generate_report()`: Generate a summary report of all tests\n\n### ResultsVisualizer Class\n\nGenerates visualizations from test results.\n\n**Methods**:\n- `__init__(results_dir, output_dir)`: Initialize with results and output directories\n- `generate_all_visualizations()`: Generate all available visualizations\n- Various plotting methods for specific visualizations\n\n## Contributing\n\nContributions to the LLM Load Testing Tool are welcome! Here\'s how you can contribute:\n\n1. **Bug reports**: Open an issue describing the bug and steps to reproduce\n2. **Feature requests**: Open an issue describing the desired feature\n3. **Code contributions**: Fork the repository, make changes, and submit a pull request\n4. **Documentation improvements**: Submit pull requests with documentation updates\n\nPlease follow the existing code style and include tests for new features.\n', + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_011nt32gEpSJZet4jGzrVXBt", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nNow, let's create a detailed guide for the visualization tools:", + ftm_tool_calls: [ + { + id: "toolu_01LCqsTPhkZq3mQ7eEz1SAMR", + function: { + arguments: + '{"path": "/home/fupfv/git/benchmark1_0701/docs/VISUALIZATION_GUIDE.md", "content": "# LLM Load Testing Visualization Guide\\n\\nThis guide provides detailed information about the visualization tools available in the LLM Load Testing Tool and how to use them effectively to analyze your benchmark results.\\n\\n## Table of Contents\\n\\n1. [Overview](#overview)\\n2. [Single Run Visualization](#single-run-visualization)\\n - [Usage](#usage)\\n - [Generated Visualizations](#generated-visualizations)\\n - [Interpreting Results](#interpreting-results)\\n3. [Run Comparison](#run-comparison)\\n - [Usage](#usage-1)\\n - [Comparison Metrics](#comparison-metrics)\\n - [Identifying Performance Changes](#identifying-performance-changes)\\n4. [Grafana Dashboard](#grafana-dashboard)\\n - [Setup](#setup)\\n - [Dashboard Panels](#dashboard-panels)\\n - [Real-time Monitoring](#real-time-monitoring)\\n5. [Custom Visualizations](#custom-visualizations)\\n6. [Best Practices](#best-practices)\\n7. [Troubleshooting](#troubleshooting)\\n\\n## Overview\\n\\nThe LLM Load Testing Tool includes three main visualization components:\\n\\n1. **visualize_results.py**: Generates comprehensive visualizations for a single test run\\n2. **compare_runs.py**: Compares results across multiple test runs\\n3. **dashboard_generator.py**: Creates Grafana dashboard configurations for real-time monitoring\\n\\nThese tools help you understand the performance characteristics of your LLM inference server under different load conditions and identify potential bottlenecks or optimization opportunities.\\n\\n## Single Run Visualization\\n\\nThe `visualize_results.py` script analyzes the results of a single test run and generates various charts and visualizations.\\n\\n### Usage\\n\\nBasic usage:\\n\\n```bash\\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\\n```\\n\\nOptions:\\n- `--results-dir`: Directory containing test results (default: results)\\n- `--output-dir`: Directory to save visualizations (default: results_dir/visualizations)\\n- `--run`: Specific run directory to analyze (default: analyze all runs)\\n\\n### Generated Visualizations\\n\\nThe script generates the following visualizations:\\n\\n#### 1. Latency by Concurrency Level\\n\\n![Latency by Concurrency](example_images/latency_by_concurrency.png)\\n\\nThis chart shows how different latency metrics (Total Latency, Time To First Token, and Latency Per Token) change as the number of concurrent users increases. It helps identify how your server\'s performance scales with load.\\n\\n#### 2. Throughput by Concurrency Level\\n\\n![Throughput by Concurrency](example_images/throughput_by_concurrency.png)\\n\\nThis bar chart displays the Queries Per Second (QPS) achieved at different concurrency levels. It helps determine the optimal concurrency level for maximum throughput.\\n\\n#### 3. Latency by Output Token Count\\n\\n![Latency by Output Tokens](example_images/latency_by_output_tokens.png)\\n\\nThis chart shows how latency metrics change with different output token counts. It helps understand the relationship between response size and latency.\\n\\n#### 4. Heatmap of Latency\\n\\n![Latency Heatmap](example_images/heatmap_latency.png)\\n\\nThis heatmap visualizes latency across different combinations of concurrency levels and output token counts. Darker colors typically indicate higher latency.\\n\\n#### 5. Latency Distribution\\n\\n![Latency Distribution](example_images/latency_distribution.png)\\n\\nThis histogram shows the distribution of total latency values, including mean and median lines. It helps identify outliers and understand the variability in response times.\\n\\n#### 6. Token Generation Speed\\n\\n![Token Generation Speed](example_images/token_generation_speed.png)\\n\\nThis chart shows the token generation speed (tokens per second) at different concurrency levels. It helps understand how token generation throughput scales with load.\\n\\n#### 7. Summary Report\\n\\nA markdown file containing key statistics and findings from the analysis, including:\\n- Total tests analyzed\\n- Average and median latency\\n- Average throughput\\n- Maximum concurrency tested\\n\\n### Interpreting Results\\n\\nWhen analyzing the visualizations, look for:\\n\\n1. **Scaling patterns**: How does latency increase with concurrency? Is there a point where throughput plateaus or decreases?\\n\\n2. **Bottlenecks**: Are there specific concurrency levels or token counts where performance degrades significantly?\\n\\n3. **Variability**: Is there high variance in latency? This might indicate inconsistent performance.\\n\\n4. **Token efficiency**: How does the token generation speed change with load? This indicates the model\'s efficiency under pressure.\\n\\n## Run Comparison\\n\\nThe `compare_runs.py` script compares results from different test runs to identify performance differences, regressions, or improvements.\\n\\n### Usage\\n\\nBasic usage:\\n\\n```bash\\npython src/compare_runs.py --base-dir results --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\\n```\\n\\nOptions:\\n- `--base-dir`: Base directory containing run directories (default: results)\\n- `--runs`: Specific run directories to compare (default: all runs)\\n- `--output-dir`: Directory to save comparison visualizations\\n\\n### Comparison Metrics\\n\\nThe script generates comparison visualizations for:\\n\\n#### 1. Latency Comparison\\n\\n![Latency Comparison](example_images/latency_comparison.png)\\n\\nThis chart compares total latency across different runs, helping identify performance improvements or regressions.\\n\\n#### 2. Throughput Comparison\\n\\n![Throughput Comparison](example_images/throughput_comparison.png)\\n\\nThis chart compares QPS across different runs, showing how throughput has changed.\\n\\n#### 3. Performance by Concurrency Level\\n\\n![Performance by Concurrency](example_images/performance_by_concurrency.png)\\n\\nThis chart shows how performance at different concurrency levels has changed across runs.\\n\\n#### 4. Performance by Output Token Count\\n\\n![Performance by Tokens](example_images/performance_by_tokens.png)\\n\\nThis chart shows how performance with different output token counts has changed across runs.\\n\\n#### 5. Summary Table\\n\\nA table showing key metrics for each run and the percentage change between runs.\\n\\n### Identifying Performance Changes\\n\\nWhen comparing runs, look for:\\n\\n1. **Consistent improvements**: Are latency reductions consistent across all concurrency levels and token counts?\\n\\n2. **Regression points**: Are there specific scenarios where performance has degraded?\\n\\n3. **Scaling changes**: Has the scaling behavior changed? For example, does the new version handle high concurrency better?\\n\\n4. **Throughput improvements**: Has the maximum achievable QPS increased?\\n\\n## Grafana Dashboard\\n\\nThe `dashboard_generator.py` script creates a Grafana dashboard configuration for real-time monitoring of load tests.\\n\\n### Setup\\n\\n1. Generate the dashboard configuration:\\n\\n```bash\\npython src/dashboard_generator.py --output-file grafana-dashboard.json\\n```\\n\\n2. Import into Grafana:\\n - Open Grafana in your web browser\\n - Navigate to Dashboards > Import\\n - Upload the generated JSON file or paste its contents\\n - Configure the data source if prompted\\n - Click Import\\n\\n### Dashboard Panels\\n\\nThe generated dashboard includes panels for:\\n\\n#### 1. Latency Metrics\\n\\nReal-time graphs of:\\n- Total Latency\\n- Time To First Token\\n- Latency Per Token\\n\\n#### 2. Throughput Metrics\\n\\nReal-time graphs of:\\n- Queries Per Second (QPS)\\n- Requests per minute\\n\\n#### 3. Token Metrics\\n\\nReal-time graphs of:\\n- Tokens generated\\n- Token generation speed (tokens/second)\\n\\n#### 4. Key Statistics\\n\\nStat panels showing:\\n- Average latency\\n- P95 latency\\n- Maximum QPS\\n- Success rate\\n\\n### Real-time Monitoring\\n\\nThe Grafana dashboard is particularly useful for:\\n\\n1. **Long-running tests**: Monitor performance over extended periods\\n\\n2. **Stability testing**: Identify performance degradation over time\\n\\n3. **Threshold alerts**: Set up alerts when metrics exceed thresholds\\n\\n4. **Sharing results**: Provide stakeholders with a live view of performance\\n\\n## Custom Visualizations\\n\\nYou can extend the visualization capabilities by modifying the existing scripts or creating new ones.\\n\\nTo add a new visualization to `visualize_results.py`:\\n\\n1. Add a new plotting method to the `ResultsVisualizer` class:\\n\\n```python\\ndef plot_custom_metric(self):\\n \\"\\"\\"Plot a custom metric visualization.\\"\\"\\"\\n plt.figure(figsize=(12, 8))\\n \\n # Your plotting code here\\n \\n plt.title(\'Custom Metric Visualization\')\\n plt.xlabel(\'X Label\')\\n plt.ylabel(\'Y Label\')\\n plt.grid(True)\\n plt.tight_layout()\\n plt.savefig(self.output_dir / \'custom_metric.png\')\\n plt.close()\\n```\\n\\n2. Add your new method to the `generate_all_visualizations` method:\\n\\n```python\\ndef generate_all_visualizations(self):\\n # Existing visualizations\\n self.plot_latency_by_concurrency()\\n # ...\\n \\n # Add your custom visualization\\n self.plot_custom_metric()\\n \\n self.generate_summary_report()\\n```\\n\\n## Best Practices\\n\\nFor effective visualization and analysis:\\n\\n1. **Consistent test parameters**: When comparing runs, ensure that the test parameters (prompt tokens, output tokens, etc.) are consistent.\\n\\n2. **Baseline comparison**: Always include a baseline run when comparing performance improvements.\\n\\n3. **Isolate variables**: When testing for specific improvements, change only one variable at a time.\\n\\n4. **Sufficient sample size**: Run tests long enough to collect a statistically significant number of samples.\\n\\n5. **Annotate changes**: Document what changed between different test runs for easier interpretation.\\n\\n6. **Regular benchmarking**: Establish a regular benchmarking cadence to track performance over time.\\n\\n7. **Export raw data**: Save raw data for custom analysis in other tools if needed.\\n\\n## Troubleshooting\\n\\nCommon visualization issues and solutions:\\n\\n1. **Missing data**: If visualizations show missing data, check that the CSV files contain the expected columns.\\n\\n2. **Pandas errors**: Ensure you have the latest version of pandas installed (`pip install --upgrade pandas`).\\n\\n3. **Matplotlib issues**: If plots don\'t render correctly, try updating matplotlib (`pip install --upgrade matplotlib`).\\n\\n4. **File not found**: Verify that the specified directories exist and contain the expected data files.\\n\\n5. **Memory errors**: For very large datasets, consider filtering the data or using a machine with more memory.\\n\\n6. **Inconsistent scales**: If comparing runs with vastly different performance characteristics, consider using log scales or normalizing the data.\\n\\n7. **Empty visualizations**: If a visualization is empty, check if the required data is present in the results files."}', + name: "create_textdoc", }, - ], - tool_call_id: "toolu_01EpCs5Tzt8XiiQnKzmo7TnN", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\n\n\nLet's create a documentation index file to tie everything together:", - ftm_tool_calls: [ - { - id: "toolu_01XAkFWtmSdgwToJLQQaq2nZ", - function: { - arguments: - '{"path": "/home/fupfv/git/benchmark1_0701/docs/index.md", "content": "# LLM Load Testing Tool Documentation\\n\\nWelcome to the documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\\n\\n## Documentation Sections\\n\\n- [Quick Start Guide](QUICK_START.md): Get up and running quickly\\n- [Comprehensive Documentation](README.md): Detailed information about all features\\n- [Visualization Guide](VISUALIZATION_GUIDE.md): In-depth guide to the visualization tools\\n- [Developer Guide](DEVELOPER_GUIDE.md): Technical information for developers\\n\\n## About the Tool\\n\\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\\n\\nKey features include:\\n- Testing with different concurrency levels and output token counts\\n- Comprehensive metrics collection (latency, throughput, token generation speed)\\n- Detailed visualization of test results\\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\\n- Comparison of different test runs and models\\n\\n## Getting Started\\n\\nIf you\'re new to the tool, we recommend starting with the [Quick Start Guide](QUICK_START.md) to get up and running quickly. Once you\'re familiar with the basics, you can explore the more detailed documentation to learn about advanced features and customization options.\\n\\n## Support\\n\\nIf you encounter any issues or have questions about the tool, please:\\n- Check the [Troubleshooting](README.md#troubleshooting) section in the documentation\\n- Open an issue on the GitHub repository\\n- Contact the maintainers\\n\\n## Contributing\\n\\nContributions to the LLM Load Testing Tool are welcome! Please see the [Contributing Guidelines](DEVELOPER_GUIDE.md#contributing-guidelines) for more information on how to contribute.\\n\\n## License\\n\\nThis project is licensed under the terms of the LICENSE file included in the repository."}', - name: "create_textdoc", - }, - type: "function", - index: 0, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: + "/home/fupfv/git/benchmark1_0701/docs/VISUALIZATION_GUIDE.md", + file_action: "edit", + line1: 1, + line2: 1, + lines_remove: "", + lines_add: + "# LLM Load Testing Visualization Guide\n\nThis guide provides detailed information about the visualization tools available in the LLM Load Testing Tool and how to use them effectively to analyze your benchmark results.\n\n## Table of Contents\n\n1. [Overview](#overview)\n2. [Single Run Visualization](#single-run-visualization)\n - [Usage](#usage)\n - [Generated Visualizations](#generated-visualizations)\n - [Interpreting Results](#interpreting-results)\n3. [Run Comparison](#run-comparison)\n - [Usage](#usage-1)\n - [Comparison Metrics](#comparison-metrics)\n - [Identifying Performance Changes](#identifying-performance-changes)\n4. [Grafana Dashboard](#grafana-dashboard)\n - [Setup](#setup)\n - [Dashboard Panels](#dashboard-panels)\n - [Real-time Monitoring](#real-time-monitoring)\n5. [Custom Visualizations](#custom-visualizations)\n6. [Best Practices](#best-practices)\n7. [Troubleshooting](#troubleshooting)\n\n## Overview\n\nThe LLM Load Testing Tool includes three main visualization components:\n\n1. **visualize_results.py**: Generates comprehensive visualizations for a single test run\n2. **compare_runs.py**: Compares results across multiple test runs\n3. **dashboard_generator.py**: Creates Grafana dashboard configurations for real-time monitoring\n\nThese tools help you understand the performance characteristics of your LLM inference server under different load conditions and identify potential bottlenecks or optimization opportunities.\n\n## Single Run Visualization\n\nThe `visualize_results.py` script analyzes the results of a single test run and generates various charts and visualizations.\n\n### Usage\n\nBasic usage:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\nOptions:\n- `--results-dir`: Directory containing test results (default: results)\n- `--output-dir`: Directory to save visualizations (default: results_dir/visualizations)\n- `--run`: Specific run directory to analyze (default: analyze all runs)\n\n### Generated Visualizations\n\nThe script generates the following visualizations:\n\n#### 1. Latency by Concurrency Level\n\n![Latency by Concurrency](example_images/latency_by_concurrency.png)\n\nThis chart shows how different latency metrics (Total Latency, Time To First Token, and Latency Per Token) change as the number of concurrent users increases. It helps identify how your server's performance scales with load.\n\n#### 2. Throughput by Concurrency Level\n\n![Throughput by Concurrency](example_images/throughput_by_concurrency.png)\n\nThis bar chart displays the Queries Per Second (QPS) achieved at different concurrency levels. It helps determine the optimal concurrency level for maximum throughput.\n\n#### 3. Latency by Output Token Count\n\n![Latency by Output Tokens](example_images/latency_by_output_tokens.png)\n\nThis chart shows how latency metrics change with different output token counts. It helps understand the relationship between response size and latency.\n\n#### 4. Heatmap of Latency\n\n![Latency Heatmap](example_images/heatmap_latency.png)\n\nThis heatmap visualizes latency across different combinations of concurrency levels and output token counts. Darker colors typically indicate higher latency.\n\n#### 5. Latency Distribution\n\n![Latency Distribution](example_images/latency_distribution.png)\n\nThis histogram shows the distribution of total latency values, including mean and median lines. It helps identify outliers and understand the variability in response times.\n\n#### 6. Token Generation Speed\n\n![Token Generation Speed](example_images/token_generation_speed.png)\n\nThis chart shows the token generation speed (tokens per second) at different concurrency levels. It helps understand how token generation throughput scales with load.\n\n#### 7. Summary Report\n\nA markdown file containing key statistics and findings from the analysis, including:\n- Total tests analyzed\n- Average and median latency\n- Average throughput\n- Maximum concurrency tested\n\n### Interpreting Results\n\nWhen analyzing the visualizations, look for:\n\n1. **Scaling patterns**: How does latency increase with concurrency? Is there a point where throughput plateaus or decreases?\n\n2. **Bottlenecks**: Are there specific concurrency levels or token counts where performance degrades significantly?\n\n3. **Variability**: Is there high variance in latency? This might indicate inconsistent performance.\n\n4. **Token efficiency**: How does the token generation speed change with load? This indicates the model's efficiency under pressure.\n\n## Run Comparison\n\nThe `compare_runs.py` script compares results from different test runs to identify performance differences, regressions, or improvements.\n\n### Usage\n\nBasic usage:\n\n```bash\npython src/compare_runs.py --base-dir results --runs run_YYYYMMDD_HHMMSS1 run_YYYYMMDD_HHMMSS2\n```\n\nOptions:\n- `--base-dir`: Base directory containing run directories (default: results)\n- `--runs`: Specific run directories to compare (default: all runs)\n- `--output-dir`: Directory to save comparison visualizations\n\n### Comparison Metrics\n\nThe script generates comparison visualizations for:\n\n#### 1. Latency Comparison\n\n![Latency Comparison](example_images/latency_comparison.png)\n\nThis chart compares total latency across different runs, helping identify performance improvements or regressions.\n\n#### 2. Throughput Comparison\n\n![Throughput Comparison](example_images/throughput_comparison.png)\n\nThis chart compares QPS across different runs, showing how throughput has changed.\n\n#### 3. Performance by Concurrency Level\n\n![Performance by Concurrency](example_images/performance_by_concurrency.png)\n\nThis chart shows how performance at different concurrency levels has changed across runs.\n\n#### 4. Performance by Output Token Count\n\n![Performance by Tokens](example_images/performance_by_tokens.png)\n\nThis chart shows how performance with different output token counts has changed across runs.\n\n#### 5. Summary Table\n\nA table showing key metrics for each run and the percentage change between runs.\n\n### Identifying Performance Changes\n\nWhen comparing runs, look for:\n\n1. **Consistent improvements**: Are latency reductions consistent across all concurrency levels and token counts?\n\n2. **Regression points**: Are there specific scenarios where performance has degraded?\n\n3. **Scaling changes**: Has the scaling behavior changed? For example, does the new version handle high concurrency better?\n\n4. **Throughput improvements**: Has the maximum achievable QPS increased?\n\n## Grafana Dashboard\n\nThe `dashboard_generator.py` script creates a Grafana dashboard configuration for real-time monitoring of load tests.\n\n### Setup\n\n1. Generate the dashboard configuration:\n\n```bash\npython src/dashboard_generator.py --output-file grafana-dashboard.json\n```\n\n2. Import into Grafana:\n - Open Grafana in your web browser\n - Navigate to Dashboards > Import\n - Upload the generated JSON file or paste its contents\n - Configure the data source if prompted\n - Click Import\n\n### Dashboard Panels\n\nThe generated dashboard includes panels for:\n\n#### 1. Latency Metrics\n\nReal-time graphs of:\n- Total Latency\n- Time To First Token\n- Latency Per Token\n\n#### 2. Throughput Metrics\n\nReal-time graphs of:\n- Queries Per Second (QPS)\n- Requests per minute\n\n#### 3. Token Metrics\n\nReal-time graphs of:\n- Tokens generated\n- Token generation speed (tokens/second)\n\n#### 4. Key Statistics\n\nStat panels showing:\n- Average latency\n- P95 latency\n- Maximum QPS\n- Success rate\n\n### Real-time Monitoring\n\nThe Grafana dashboard is particularly useful for:\n\n1. **Long-running tests**: Monitor performance over extended periods\n\n2. **Stability testing**: Identify performance degradation over time\n\n3. **Threshold alerts**: Set up alerts when metrics exceed thresholds\n\n4. **Sharing results**: Provide stakeholders with a live view of performance\n\n## Custom Visualizations\n\nYou can extend the visualization capabilities by modifying the existing scripts or creating new ones.\n\nTo add a new visualization to `visualize_results.py`:\n\n1. Add a new plotting method to the `ResultsVisualizer` class:\n\n```python\ndef plot_custom_metric(self):\n \"\"\"Plot a custom metric visualization.\"\"\"\n plt.figure(figsize=(12, 8))\n \n # Your plotting code here\n \n plt.title('Custom Metric Visualization')\n plt.xlabel('X Label')\n plt.ylabel('Y Label')\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(self.output_dir / 'custom_metric.png')\n plt.close()\n```\n\n2. Add your new method to the `generate_all_visualizations` method:\n\n```python\ndef generate_all_visualizations(self):\n # Existing visualizations\n self.plot_latency_by_concurrency()\n # ...\n \n # Add your custom visualization\n self.plot_custom_metric()\n \n self.generate_summary_report()\n```\n\n## Best Practices\n\nFor effective visualization and analysis:\n\n1. **Consistent test parameters**: When comparing runs, ensure that the test parameters (prompt tokens, output tokens, etc.) are consistent.\n\n2. **Baseline comparison**: Always include a baseline run when comparing performance improvements.\n\n3. **Isolate variables**: When testing for specific improvements, change only one variable at a time.\n\n4. **Sufficient sample size**: Run tests long enough to collect a statistically significant number of samples.\n\n5. **Annotate changes**: Document what changed between different test runs for easier interpretation.\n\n6. **Regular benchmarking**: Establish a regular benchmarking cadence to track performance over time.\n\n7. **Export raw data**: Save raw data for custom analysis in other tools if needed.\n\n## Troubleshooting\n\nCommon visualization issues and solutions:\n\n1. **Missing data**: If visualizations show missing data, check that the CSV files contain the expected columns.\n\n2. **Pandas errors**: Ensure you have the latest version of pandas installed (`pip install --upgrade pandas`).\n\n3. **Matplotlib issues**: If plots don't render correctly, try updating matplotlib (`pip install --upgrade matplotlib`).\n\n4. **File not found**: Verify that the specified directories exist and contain the expected data files.\n\n5. **Memory errors**: For very large datasets, consider filtering the data or using a machine with more memory.\n\n6. **Inconsistent scales**: If comparing runs with vastly different performance characteristics, consider using log scales or normalizing the data.\n\n7. **Empty visualizations**: If a visualization is empty, check if the required data is present in the results files.\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01LCqsTPhkZq3mQ7eEz1SAMR", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\n\n\nNow, let's create a technical guide for developers who want to extend the tool:", + ftm_tool_calls: [ + { + id: "toolu_018ik5qU9MGsUpZY4Bn5f33o", + function: { + arguments: + '{"path": "/home/fupfv/git/benchmark1_0701/docs/DEVELOPER_GUIDE.md", "content": "# LLM Load Testing Tool Developer Guide\\n\\nThis guide is intended for developers who want to extend, modify, or contribute to the LLM Load Testing Tool. It provides technical details about the codebase, architecture, and guidelines for development.\\n\\n## Table of Contents\\n\\n1. [Architecture Overview](#architecture-overview)\\n2. [Core Components](#core-components)\\n3. [Adding New Features](#adding-new-features)\\n - [Adding a New Provider](#adding-a-new-provider)\\n - [Adding New Metrics](#adding-new-metrics)\\n - [Adding New Visualizations](#adding-new-visualizations)\\n4. [Testing Guidelines](#testing-guidelines)\\n5. [Code Style and Conventions](#code-style-and-conventions)\\n6. [Contributing Guidelines](#contributing-guidelines)\\n\\n## Architecture Overview\\n\\nThe LLM Load Testing Tool is built with a modular architecture that separates the concerns of test execution, data collection, and visualization. The main components are:\\n\\n1. **Test Runner**: Orchestrates the execution of load tests with different parameters.\\n2. **Load Test Implementation**: Uses Locust to generate load and collect metrics.\\n3. **Results Processing**: Parses and processes the raw test results.\\n4. **Visualization Tools**: Generate charts and reports from the processed results.\\n\\nThe data flow through the system is as follows:\\n\\n```\\nTest Configuration → Test Runner → Load Test Implementation → Raw Results → Results Processing → Visualizations\\n```\\n\\n## Core Components\\n\\n### 1. llm_load_test_runner.py\\n\\nThis is the main entry point for running load tests. It:\\n- Configures test parameters\\n- Creates a results directory\\n- Runs tests with different combinations of users and output tokens\\n- Generates summary reports\\n\\nKey classes and methods:\\n- `LLMLoadTest`: Main class for orchestrating tests\\n - `run_test(test_name, users, output_tokens)`: Runs a single test\\n - `write_test_report(...)`: Writes test results to files\\n - `parse_output(output)`: Parses metrics from test output\\n - `generate_report()`: Generates a summary report\\n\\n### 2. load_test.py\\n\\nThis file contains the Locust implementation for generating load. It:\\n- Defines user behavior for load testing\\n- Implements different provider classes for various LLM APIs\\n- Collects and reports metrics\\n\\nKey classes:\\n- `LLMUser`: Locust user class that sends requests to the LLM server\\n- `BaseProvider`: Abstract base class for LLM providers\\n - `OpenAIProvider`, `VllmProvider`, etc.: Provider-specific implementations\\n- `LengthSampler`: Utility for sampling token lengths\\n- `FixedQPSPacer`: Utility for controlling request rate\\n\\n### 3. llm_test_logger.py\\n\\nHandles logging of test results and details.\\n\\n### 4. visualize_results.py\\n\\nGenerates visualizations from test results. Key components:\\n- `ResultsVisualizer`: Main class for generating visualizations\\n - Various plotting methods for different metrics\\n - `generate_all_visualizations()`: Generates all visualizations\\n\\n### 5. compare_runs.py\\n\\nCompares results from different test runs.\\n\\n### 6. dashboard_generator.py\\n\\nGenerates Grafana dashboard configurations.\\n\\n## Adding New Features\\n\\n### Adding a New Provider\\n\\nTo add support for a new LLM provider:\\n\\n1. Create a new provider class in `load_test.py` that inherits from `BaseProvider`:\\n\\n```python\\nclass NewProvider(BaseProvider):\\n DEFAULT_MODEL_NAME = \\"default-model-name\\" # Optional default model name\\n \\n def get_url(self):\\n \\"\\"\\"Return the API endpoint URL.\\"\\"\\"\\n return \\"/api/endpoint\\"\\n \\n def format_payload(self, prompt, max_tokens, images):\\n \\"\\"\\"Format the request payload for this provider.\\"\\"\\"\\n data = {\\n \\"model\\": self.model,\\n \\"prompt\\": prompt,\\n \\"max_tokens\\": max_tokens,\\n # Provider-specific parameters\\n \\"provider_param\\": \\"value\\"\\n }\\n return data\\n \\n def parse_output_json(self, data, prompt):\\n \\"\\"\\"Parse the response from this provider.\\"\\"\\"\\n # Extract text, token counts, etc.\\n text = data.get(\\"output\\", \\"\\")\\n tokens = data.get(\\"token_count\\", 0)\\n \\n return ChunkMetadata(\\n text=text,\\n logprob_tokens=None,\\n usage_tokens=tokens,\\n prompt_usage_tokens=None\\n )\\n```\\n\\n2. Add the provider to the `PROVIDER_CLASS_MAP` dictionary:\\n\\n```python\\nPROVIDER_CLASS_MAP = {\\n # Existing providers\\n \\"openai\\": OpenAIProvider,\\n \\"vllm\\": VllmProvider,\\n # Add your new provider\\n \\"new_provider\\": NewProvider,\\n}\\n```\\n\\n### Adding New Metrics\\n\\nTo add a new metric to track:\\n\\n1. Modify the `LLMUser.generate_text` method in `load_test.py` to collect the new metric:\\n\\n```python\\n@task\\ndef generate_text(self):\\n # Existing code...\\n \\n # Add your new metric calculation\\n new_metric_value = calculate_new_metric(response, prompt)\\n \\n # Add the custom metric\\n add_custom_metric(\\"new_metric_name\\", new_metric_value)\\n```\\n\\n2. Update the `write_test_report` method in `llm_load_test_runner.py` to include the new metric:\\n\\n```python\\ndef write_test_report(self, test_name, response_text, duration, exit_code, prompt_tokens, provider=None, model=None, generation_tokens=None, stream=None, temperature=None, logprobs=None, concurrency=None, time_to_first_token=None, latency_per_token=None, num_tokens=None, total_latency=None, num_requests=None, qps=None, new_metric=None):\\n # Add the new metric to report_data\\n report_data = {\\n # Existing fields...\\n \\"New Metric\\": new_metric,\\n }\\n \\n # Update CSV writer to include the new field\\n writer.writerow([\\"Response received\\", ..., \\"New Metric\\", ...])\\n writer.writerow([response_text, ..., new_metric, ...])\\n```\\n\\n### Adding New Visualizations\\n\\nTo add a new visualization:\\n\\n1. Add a new plotting method to the `ResultsVisualizer` class in `visualize_results.py`:\\n\\n```python\\ndef plot_new_visualization(self):\\n \\"\\"\\"Plot a new visualization.\\"\\"\\"\\n if \'required_column\' not in self.data.columns:\\n print(\\"Required column not found for new visualization\\")\\n return\\n \\n plt.figure(figsize=(12, 8))\\n \\n # Your plotting code here\\n sns.lineplot(x=\'Concurrency\', y=\'new_metric\', data=self.data)\\n \\n plt.title(\'New Metric Visualization\')\\n plt.xlabel(\'Concurrent Users\')\\n plt.ylabel(\'New Metric\')\\n plt.grid(True)\\n plt.tight_layout()\\n plt.savefig(self.output_dir / \'new_visualization.png\')\\n plt.close()\\n```\\n\\n2. Add your new method to the `generate_all_visualizations` method:\\n\\n```python\\ndef generate_all_visualizations(self):\\n # Existing visualizations\\n self.plot_latency_by_concurrency()\\n # ...\\n \\n # Add your new visualization\\n self.plot_new_visualization()\\n \\n self.generate_summary_report()\\n```\\n\\n## Testing Guidelines\\n\\nWhen developing new features or fixing bugs, follow these testing guidelines:\\n\\n1. **Unit Tests**: Write unit tests for new functionality, especially for data processing and parsing logic.\\n\\n2. **Integration Tests**: Test the integration between components, such as running a test and verifying that the results are correctly processed.\\n\\n3. **Manual Testing**: For visualizations and UI components, perform manual testing to ensure they render correctly.\\n\\n4. **Test with Different Providers**: Ensure that new features work with all supported providers.\\n\\n5. **Test with Different Parameters**: Verify that the tool works correctly with different combinations of users, tokens, and other parameters.\\n\\n6. **Error Handling**: Test error cases to ensure the tool gracefully handles failures.\\n\\n## Code Style and Conventions\\n\\nFollow these style guidelines when contributing to the codebase:\\n\\n1. **PEP 8**: Follow the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code.\\n\\n2. **Docstrings**: Use docstrings to document classes, methods, and functions. Follow the [Google style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstrings.\\n\\n3. **Type Hints**: Use type hints to indicate parameter and return types.\\n\\n4. **Naming Conventions**:\\n - Classes: `CamelCase`\\n - Functions and methods: `snake_case`\\n - Variables: `snake_case`\\n - Constants: `UPPER_SNAKE_CASE`\\n\\n5. **Comments**: Add comments to explain complex logic or non-obvious behavior.\\n\\n6. **Imports**: Organize imports in the following order:\\n - Standard library imports\\n - Third-party library imports\\n - Local application imports\\n\\n## Contributing Guidelines\\n\\nTo contribute to the LLM Load Testing Tool:\\n\\n1. **Fork the Repository**: Create a fork of the repository on GitHub.\\n\\n2. **Create a Branch**: Create a branch for your feature or bug fix.\\n\\n3. **Make Changes**: Implement your changes following the code style guidelines.\\n\\n4. **Write Tests**: Add tests for your changes to ensure they work correctly.\\n\\n5. **Update Documentation**: Update the documentation to reflect your changes.\\n\\n6. **Submit a Pull Request**: Submit a pull request with your changes.\\n\\n7. **Code Review**: Address any feedback from the code review.\\n\\n8. **Merge**: Once approved, your changes will be merged into the main branch.\\n\\n### Commit Message Guidelines\\n\\nFollow these guidelines for commit messages:\\n\\n- Use the present tense (\\"Add feature\\" not \\"Added feature\\")\\n- Use the imperative mood (\\"Move cursor to...\\" not \\"Moves cursor to...\\")\\n- Limit the first line to 72 characters or less\\n- Reference issues and pull requests liberally after the first line\\n\\nExample:\\n```\\nAdd support for XYZ provider\\n\\nThis commit adds support for the XYZ provider, including:\\n- Implementation of XYZProvider class\\n- Unit tests for XYZ provider\\n- Documentation updates\\n\\nFixes #123\\n```\\n\\nThank you for contributing to the LLM Load Testing Tool!"}', + name: "create_textdoc", }, - ], - finish_reason: "stop", - }, - { - ftm_role: "diff", - ftm_content: [ - { - file_name: "/home/fupfv/git/benchmark1_0701/docs/index.md", - file_action: "edit", - line1: 1, - line2: 1, - lines_remove: "", - lines_add: - "# LLM Load Testing Tool Documentation\n\nWelcome to the documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\n\n## Documentation Sections\n\n- [Quick Start Guide](QUICK_START.md): Get up and running quickly\n- [Comprehensive Documentation](README.md): Detailed information about all features\n- [Visualization Guide](VISUALIZATION_GUIDE.md): In-depth guide to the visualization tools\n- [Developer Guide](DEVELOPER_GUIDE.md): Technical information for developers\n\n## About the Tool\n\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\n\nKey features include:\n- Testing with different concurrency levels and output token counts\n- Comprehensive metrics collection (latency, throughput, token generation speed)\n- Detailed visualization of test results\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\n- Comparison of different test runs and models\n\n## Getting Started\n\nIf you're new to the tool, we recommend starting with the [Quick Start Guide](QUICK_START.md) to get up and running quickly. Once you're familiar with the basics, you can explore the more detailed documentation to learn about advanced features and customization options.\n\n## Support\n\nIf you encounter any issues or have questions about the tool, please:\n- Check the [Troubleshooting](README.md#troubleshooting) section in the documentation\n- Open an issue on the GitHub repository\n- Contact the maintainers\n\n## Contributing\n\nContributions to the LLM Load Testing Tool are welcome! Please see the [Contributing Guidelines](DEVELOPER_GUIDE.md#contributing-guidelines) for more information on how to contribute.\n\n## License\n\nThis project is licensed under the terms of the LICENSE file included in the repository.\n", - file_name_rename: null, - application_details: "", + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: "/home/fupfv/git/benchmark1_0701/docs/DEVELOPER_GUIDE.md", + file_action: "edit", + line1: 1, + line2: 1, + lines_remove: "", + lines_add: + '# LLM Load Testing Tool Developer Guide\n\nThis guide is intended for developers who want to extend, modify, or contribute to the LLM Load Testing Tool. It provides technical details about the codebase, architecture, and guidelines for development.\n\n## Table of Contents\n\n1. [Architecture Overview](#architecture-overview)\n2. [Core Components](#core-components)\n3. [Adding New Features](#adding-new-features)\n - [Adding a New Provider](#adding-a-new-provider)\n - [Adding New Metrics](#adding-new-metrics)\n - [Adding New Visualizations](#adding-new-visualizations)\n4. [Testing Guidelines](#testing-guidelines)\n5. [Code Style and Conventions](#code-style-and-conventions)\n6. [Contributing Guidelines](#contributing-guidelines)\n\n## Architecture Overview\n\nThe LLM Load Testing Tool is built with a modular architecture that separates the concerns of test execution, data collection, and visualization. The main components are:\n\n1. **Test Runner**: Orchestrates the execution of load tests with different parameters.\n2. **Load Test Implementation**: Uses Locust to generate load and collect metrics.\n3. **Results Processing**: Parses and processes the raw test results.\n4. **Visualization Tools**: Generate charts and reports from the processed results.\n\nThe data flow through the system is as follows:\n\n```\nTest Configuration → Test Runner → Load Test Implementation → Raw Results → Results Processing → Visualizations\n```\n\n## Core Components\n\n### 1. llm_load_test_runner.py\n\nThis is the main entry point for running load tests. It:\n- Configures test parameters\n- Creates a results directory\n- Runs tests with different combinations of users and output tokens\n- Generates summary reports\n\nKey classes and methods:\n- `LLMLoadTest`: Main class for orchestrating tests\n - `run_test(test_name, users, output_tokens)`: Runs a single test\n - `write_test_report(...)`: Writes test results to files\n - `parse_output(output)`: Parses metrics from test output\n - `generate_report()`: Generates a summary report\n\n### 2. load_test.py\n\nThis file contains the Locust implementation for generating load. It:\n- Defines user behavior for load testing\n- Implements different provider classes for various LLM APIs\n- Collects and reports metrics\n\nKey classes:\n- `LLMUser`: Locust user class that sends requests to the LLM server\n- `BaseProvider`: Abstract base class for LLM providers\n - `OpenAIProvider`, `VllmProvider`, etc.: Provider-specific implementations\n- `LengthSampler`: Utility for sampling token lengths\n- `FixedQPSPacer`: Utility for controlling request rate\n\n### 3. llm_test_logger.py\n\nHandles logging of test results and details.\n\n### 4. visualize_results.py\n\nGenerates visualizations from test results. Key components:\n- `ResultsVisualizer`: Main class for generating visualizations\n - Various plotting methods for different metrics\n - `generate_all_visualizations()`: Generates all visualizations\n\n### 5. compare_runs.py\n\nCompares results from different test runs.\n\n### 6. dashboard_generator.py\n\nGenerates Grafana dashboard configurations.\n\n## Adding New Features\n\n### Adding a New Provider\n\nTo add support for a new LLM provider:\n\n1. Create a new provider class in `load_test.py` that inherits from `BaseProvider`:\n\n```python\nclass NewProvider(BaseProvider):\n DEFAULT_MODEL_NAME = "default-model-name" # Optional default model name\n \n def get_url(self):\n """Return the API endpoint URL."""\n return "/api/endpoint"\n \n def format_payload(self, prompt, max_tokens, images):\n """Format the request payload for this provider."""\n data = {\n "model": self.model,\n "prompt": prompt,\n "max_tokens": max_tokens,\n # Provider-specific parameters\n "provider_param": "value"\n }\n return data\n \n def parse_output_json(self, data, prompt):\n """Parse the response from this provider."""\n # Extract text, token counts, etc.\n text = data.get("output", "")\n tokens = data.get("token_count", 0)\n \n return ChunkMetadata(\n text=text,\n logprob_tokens=None,\n usage_tokens=tokens,\n prompt_usage_tokens=None\n )\n```\n\n2. Add the provider to the `PROVIDER_CLASS_MAP` dictionary:\n\n```python\nPROVIDER_CLASS_MAP = {\n # Existing providers\n "openai": OpenAIProvider,\n "vllm": VllmProvider,\n # Add your new provider\n "new_provider": NewProvider,\n}\n```\n\n### Adding New Metrics\n\nTo add a new metric to track:\n\n1. Modify the `LLMUser.generate_text` method in `load_test.py` to collect the new metric:\n\n```python\n@task\ndef generate_text(self):\n # Existing code...\n \n # Add your new metric calculation\n new_metric_value = calculate_new_metric(response, prompt)\n \n # Add the custom metric\n add_custom_metric("new_metric_name", new_metric_value)\n```\n\n2. Update the `write_test_report` method in `llm_load_test_runner.py` to include the new metric:\n\n```python\ndef write_test_report(self, test_name, response_text, duration, exit_code, prompt_tokens, provider=None, model=None, generation_tokens=None, stream=None, temperature=None, logprobs=None, concurrency=None, time_to_first_token=None, latency_per_token=None, num_tokens=None, total_latency=None, num_requests=None, qps=None, new_metric=None):\n # Add the new metric to report_data\n report_data = {\n # Existing fields...\n "New Metric": new_metric,\n }\n \n # Update CSV writer to include the new field\n writer.writerow(["Response received", ..., "New Metric", ...])\n writer.writerow([response_text, ..., new_metric, ...])\n```\n\n### Adding New Visualizations\n\nTo add a new visualization:\n\n1. Add a new plotting method to the `ResultsVisualizer` class in `visualize_results.py`:\n\n```python\ndef plot_new_visualization(self):\n """Plot a new visualization."""\n if \'required_column\' not in self.data.columns:\n print("Required column not found for new visualization")\n return\n \n plt.figure(figsize=(12, 8))\n \n # Your plotting code here\n sns.lineplot(x=\'Concurrency\', y=\'new_metric\', data=self.data)\n \n plt.title(\'New Metric Visualization\')\n plt.xlabel(\'Concurrent Users\')\n plt.ylabel(\'New Metric\')\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(self.output_dir / \'new_visualization.png\')\n plt.close()\n```\n\n2. Add your new method to the `generate_all_visualizations` method:\n\n```python\ndef generate_all_visualizations(self):\n # Existing visualizations\n self.plot_latency_by_concurrency()\n # ...\n \n # Add your new visualization\n self.plot_new_visualization()\n \n self.generate_summary_report()\n```\n\n## Testing Guidelines\n\nWhen developing new features or fixing bugs, follow these testing guidelines:\n\n1. **Unit Tests**: Write unit tests for new functionality, especially for data processing and parsing logic.\n\n2. **Integration Tests**: Test the integration between components, such as running a test and verifying that the results are correctly processed.\n\n3. **Manual Testing**: For visualizations and UI components, perform manual testing to ensure they render correctly.\n\n4. **Test with Different Providers**: Ensure that new features work with all supported providers.\n\n5. **Test with Different Parameters**: Verify that the tool works correctly with different combinations of users, tokens, and other parameters.\n\n6. **Error Handling**: Test error cases to ensure the tool gracefully handles failures.\n\n## Code Style and Conventions\n\nFollow these style guidelines when contributing to the codebase:\n\n1. **PEP 8**: Follow the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code.\n\n2. **Docstrings**: Use docstrings to document classes, methods, and functions. Follow the [Google style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstrings.\n\n3. **Type Hints**: Use type hints to indicate parameter and return types.\n\n4. **Naming Conventions**:\n - Classes: `CamelCase`\n - Functions and methods: `snake_case`\n - Variables: `snake_case`\n - Constants: `UPPER_SNAKE_CASE`\n\n5. **Comments**: Add comments to explain complex logic or non-obvious behavior.\n\n6. **Imports**: Organize imports in the following order:\n - Standard library imports\n - Third-party library imports\n - Local application imports\n\n## Contributing Guidelines\n\nTo contribute to the LLM Load Testing Tool:\n\n1. **Fork the Repository**: Create a fork of the repository on GitHub.\n\n2. **Create a Branch**: Create a branch for your feature or bug fix.\n\n3. **Make Changes**: Implement your changes following the code style guidelines.\n\n4. **Write Tests**: Add tests for your changes to ensure they work correctly.\n\n5. **Update Documentation**: Update the documentation to reflect your changes.\n\n6. **Submit a Pull Request**: Submit a pull request with your changes.\n\n7. **Code Review**: Address any feedback from the code review.\n\n8. **Merge**: Once approved, your changes will be merged into the main branch.\n\n### Commit Message Guidelines\n\nFollow these guidelines for commit messages:\n\n- Use the present tense ("Add feature" not "Added feature")\n- Use the imperative mood ("Move cursor to..." not "Moves cursor to...")\n- Limit the first line to 72 characters or less\n- Reference issues and pull requests liberally after the first line\n\nExample:\n```\nAdd support for XYZ provider\n\nThis commit adds support for the XYZ provider, including:\n- Implementation of XYZProvider class\n- Unit tests for XYZ provider\n- Documentation updates\n\nFixes #123\n```\n\nThank you for contributing to the LLM Load Testing Tool!\n', + file_name_rename: null, + application_details: "", + }, + ], + ftm_call_id: "toolu_018ik5qU9MGsUpZY4Bn5f33o", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\n\n\nFinally, let's create a quick start guide for users who want to get started quickly:", + ftm_tool_calls: [ + { + id: "toolu_01EpCs5Tzt8XiiQnKzmo7TnN", + function: { + arguments: + '{"path": "/home/fupfv/git/benchmark1_0701/docs/QUICK_START.md", "content": "# LLM Load Testing Tool - Quick Start Guide\\n\\nThis guide will help you quickly set up and run your first load test with the LLM Load Testing Tool.\\n\\n## Prerequisites\\n\\n- Python 3.8 or higher\\n- pip (Python package manager)\\n- Access to an LLM inference server (or API key for a hosted service)\\n\\n## Installation\\n\\n1. Clone the repository (or download and extract the ZIP file):\\n\\n```bash\\ngit clone \\ncd llm-load-test\\n```\\n\\n2. Install the required dependencies:\\n\\n```bash\\npip install -r requirements.txt\\n```\\n\\n3. Make the scripts executable (optional, for Linux/macOS):\\n\\n```bash\\nbash make_scripts_executable.sh\\n```\\n\\n## Running Your First Test\\n\\n### Step 1: Configure the Test\\n\\nOpen `src/llm_load_test_runner.py` and modify the `base_config` dictionary with your LLM server details:\\n\\n```python\\nbase_config = {\\n \\"host\\": \\"https://your-llm-server.com/\\", # URL of your LLM server\\n \\"provider\\": \\"openai\\", # Provider type (openai, vllm, etc.)\\n \\"model\\": \\"your-model-name\\", # Model name\\n \\"api-key\\": \\"your-api-key\\", # API key (if required)\\n \\"logprobs\\": 5, # Log probabilities (optional)\\n \\"run-time\\": \\"60s\\", # Duration of each test\\n \\"temperature\\": 1.0, # Temperature for generation\\n}\\n```\\n\\n### Step 2: Run the Test\\n\\nRun the load test with the default parameters:\\n\\n```bash\\npython src/llm_load_test_runner.py\\n```\\n\\nThis will run tests with:\\n- Users: 1, 2, 50 (concurrency levels)\\n- Output tokens: 15, 30\\n- Run time: 60s per test\\n- Prompt tokens: 4046\\n- Temperature: 1.0\\n\\n### Step 3: View the Results\\n\\nAfter the tests complete, results will be saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\\n\\n- Individual test results: `results_test_u{users}_o{tokens}.csv`\\n- Summary reports: \\n - `load_test_report_YYYYMMDD_HHMMSS.csv`\\n - `load_test_report_YYYYMMDD_HHMMSS.json`\\n\\n### Step 4: Generate Visualizations\\n\\nGenerate visualizations from the test results:\\n\\n```bash\\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\\n```\\n\\nVisualizations will be saved in the `results/run_YYYYMMDD_HHMMSS/visualizations/` directory.\\n\\n## Customizing Your Tests\\n\\n### Changing Test Parameters\\n\\nTo customize the test parameters, modify these variables in `llm_load_test_runner.py`:\\n\\n```python\\nconcurrent_users = [1, 2, 5, 10, 20, 50] # Concurrency levels to test\\nmax_tokens = [1, 15, 30, 50] # Output token counts to test\\nbase_config[\\"run-time\\"] = \\"60s\\" # Duration of each test\\n```\\n\\n### Using a Different Prompt\\n\\nThe default prompt is defined in `load_test.py`. To use a different prompt, modify the `prompt` variable:\\n\\n```python\\nprompt = \\"\\"\\"Your custom prompt text here\\"\\"\\"\\n```\\n\\n### Testing with Different Providers\\n\\nThe tool supports various LLM providers. To use a different provider, set the `provider` field in `base_config`:\\n\\n```python\\nbase_config = {\\n \\"provider\\": \\"vllm\\", # Change to your desired provider\\n # Other configuration...\\n}\\n```\\n\\nSupported providers include:\\n- `openai`: OpenAI-compatible APIs\\n- `vllm`: vLLM server\\n- `fireworks`: Fireworks AI\\n- `together`: Together AI\\n- `triton-infer`: Triton Inference Server\\n- `triton-generate`: Triton Generate API\\n- `tgi`: Text Generation Inference\\n\\n## Common Issues and Solutions\\n\\n### Connection Errors\\n\\nIf you encounter connection errors:\\n- Check that your LLM server is running and accessible\\n- Verify that your API key is correct\\n- Check for any network restrictions\\n\\n### Timeout Errors\\n\\nIf requests are timing out:\\n- Increase the timeout settings\\n- Reduce the concurrency level\\n- Check server resource utilization\\n\\n### Missing Data in Visualizations\\n\\nIf visualizations are missing data:\\n- Check that the CSV files contain the expected columns\\n- Ensure the test completed successfully\\n- Try running with fewer concurrent users or shorter output lengths\\n\\n## Next Steps\\n\\nFor more detailed information:\\n\\n- [Full Documentation](docs/README.md): Comprehensive documentation of all features\\n- [Visualization Guide](docs/VISUALIZATION_GUIDE.md): Detailed guide to the visualization tools\\n- [Developer Guide](docs/DEVELOPER_GUIDE.md): Guide for developers who want to extend the tool\\n\\n## Getting Help\\n\\nIf you encounter any issues or have questions:\\n\\n- Check the [Troubleshooting](docs/README.md#troubleshooting) section in the documentation\\n- Open an issue on the GitHub repository\\n- Contact the maintainers"}', + name: "create_textdoc", }, - ], - tool_call_id: "toolu_01XAkFWtmSdgwToJLQQaq2nZ", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\n\n\nLet's create a knowledge entry to document what we've done:", - ftm_tool_calls: [ - { - id: "toolu_01QS5KdB2vJApy8dgHkzjmyA", - function: { - arguments: - '{"im_going_to_use_tools": "documentation, markdown, technical writing", "im_going_to_apply_to": "benchmark1_0701, LLM Load Testing Tool, documentation, visualization tools, developer guide", "language_slash_framework": "python/locust", "search_key": "Creating comprehensive documentation for LLM load testing tool with Locust, including user guides, visualization documentation, and developer documentation", "knowledge_entry": "I created a comprehensive documentation suite for an LLM Load Testing Tool built with Python and Locust. The documentation was structured into several key components:\\n\\n1. Main Documentation (README.md):\\n - Provided a complete overview of the tool\'s functionality\\n - Included installation instructions, configuration options, and usage examples\\n - Documented all available metrics and how to interpret them\\n - Covered troubleshooting and advanced usage scenarios\\n\\n2. Visualization Guide (VISUALIZATION_GUIDE.md):\\n - Detailed explanation of all visualization tools (visualize_results.py, compare_runs.py, dashboard_generator.py)\\n - Instructions for generating and interpreting different types of visualizations\\n - Best practices for effective visualization and analysis\\n - Troubleshooting for visualization-specific issues\\n\\n3. Developer Guide (DEVELOPER_GUIDE.md):\\n - Technical architecture overview of the codebase\\n - Instructions for extending the tool with new providers, metrics, and visualizations\\n - Code style guidelines and testing recommendations\\n - Contributing guidelines for potential contributors\\n\\n4. Quick Start Guide (QUICK_START.md):\\n - Streamlined instructions for getting started quickly\\n - Basic configuration and test execution steps\\n - Common issues and solutions\\n - Pointers to more detailed documentation\\n\\n5. Documentation Index (index.md):\\n - Central navigation hub for all documentation\\n - Brief overview of the tool and its capabilities\\n - Links to all documentation sections\\n\\nThe documentation was designed to be comprehensive yet accessible, with clear navigation between different sections. It covers both user-focused aspects (how to run tests and interpret results) and developer-focused aspects (how to extend and contribute to the codebase).\\n\\nKey insights from analyzing the codebase:\\n- The tool uses Locust for load generation and metrics collection\\n- It supports multiple LLM providers through a provider class hierarchy\\n- Visualization is handled by matplotlib and seaborn\\n- The architecture separates concerns between test execution, data collection, and visualization\\n- The tool generates both CSV and JSON reports for further analysis\\n\\nThis documentation structure provides a solid foundation for users to understand and effectively use the LLM Load Testing Tool, while also enabling developers to extend and improve it."}', - name: "create_knowledge", - }, - type: "function", - index: 0, + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: "/home/fupfv/git/benchmark1_0701/docs/QUICK_START.md", + file_action: "edit", + line1: 1, + line2: 1, + lines_remove: "", + lines_add: + '# LLM Load Testing Tool - Quick Start Guide\n\nThis guide will help you quickly set up and run your first load test with the LLM Load Testing Tool.\n\n## Prerequisites\n\n- Python 3.8 or higher\n- pip (Python package manager)\n- Access to an LLM inference server (or API key for a hosted service)\n\n## Installation\n\n1. Clone the repository (or download and extract the ZIP file):\n\n```bash\ngit clone \ncd llm-load-test\n```\n\n2. Install the required dependencies:\n\n```bash\npip install -r requirements.txt\n```\n\n3. Make the scripts executable (optional, for Linux/macOS):\n\n```bash\nbash make_scripts_executable.sh\n```\n\n## Running Your First Test\n\n### Step 1: Configure the Test\n\nOpen `src/llm_load_test_runner.py` and modify the `base_config` dictionary with your LLM server details:\n\n```python\nbase_config = {\n "host": "https://your-llm-server.com/", # URL of your LLM server\n "provider": "openai", # Provider type (openai, vllm, etc.)\n "model": "your-model-name", # Model name\n "api-key": "your-api-key", # API key (if required)\n "logprobs": 5, # Log probabilities (optional)\n "run-time": "60s", # Duration of each test\n "temperature": 1.0, # Temperature for generation\n}\n```\n\n### Step 2: Run the Test\n\nRun the load test with the default parameters:\n\n```bash\npython src/llm_load_test_runner.py\n```\n\nThis will run tests with:\n- Users: 1, 2, 50 (concurrency levels)\n- Output tokens: 15, 30\n- Run time: 60s per test\n- Prompt tokens: 4046\n- Temperature: 1.0\n\n### Step 3: View the Results\n\nAfter the tests complete, results will be saved in the `results/run_YYYYMMDD_HHMMSS/` directory:\n\n- Individual test results: `results_test_u{users}_o{tokens}.csv`\n- Summary reports: \n - `load_test_report_YYYYMMDD_HHMMSS.csv`\n - `load_test_report_YYYYMMDD_HHMMSS.json`\n\n### Step 4: Generate Visualizations\n\nGenerate visualizations from the test results:\n\n```bash\npython src/visualize_results.py --results-dir results/run_YYYYMMDD_HHMMSS\n```\n\nVisualizations will be saved in the `results/run_YYYYMMDD_HHMMSS/visualizations/` directory.\n\n## Customizing Your Tests\n\n### Changing Test Parameters\n\nTo customize the test parameters, modify these variables in `llm_load_test_runner.py`:\n\n```python\nconcurrent_users = [1, 2, 5, 10, 20, 50] # Concurrency levels to test\nmax_tokens = [1, 15, 30, 50] # Output token counts to test\nbase_config["run-time"] = "60s" # Duration of each test\n```\n\n### Using a Different Prompt\n\nThe default prompt is defined in `load_test.py`. To use a different prompt, modify the `prompt` variable:\n\n```python\nprompt = """Your custom prompt text here"""\n```\n\n### Testing with Different Providers\n\nThe tool supports various LLM providers. To use a different provider, set the `provider` field in `base_config`:\n\n```python\nbase_config = {\n "provider": "vllm", # Change to your desired provider\n # Other configuration...\n}\n```\n\nSupported providers include:\n- `openai`: OpenAI-compatible APIs\n- `vllm`: vLLM server\n- `fireworks`: Fireworks AI\n- `together`: Together AI\n- `triton-infer`: Triton Inference Server\n- `triton-generate`: Triton Generate API\n- `tgi`: Text Generation Inference\n\n## Common Issues and Solutions\n\n### Connection Errors\n\nIf you encounter connection errors:\n- Check that your LLM server is running and accessible\n- Verify that your API key is correct\n- Check for any network restrictions\n\n### Timeout Errors\n\nIf requests are timing out:\n- Increase the timeout settings\n- Reduce the concurrency level\n- Check server resource utilization\n\n### Missing Data in Visualizations\n\nIf visualizations are missing data:\n- Check that the CSV files contain the expected columns\n- Ensure the test completed successfully\n- Try running with fewer concurrent users or shorter output lengths\n\n## Next Steps\n\nFor more detailed information:\n\n- [Full Documentation](docs/README.md): Comprehensive documentation of all features\n- [Visualization Guide](docs/VISUALIZATION_GUIDE.md): Detailed guide to the visualization tools\n- [Developer Guide](docs/DEVELOPER_GUIDE.md): Guide for developers who want to extend the tool\n\n## Getting Help\n\nIf you encounter any issues or have questions:\n\n- Check the [Troubleshooting](docs/README.md#troubleshooting) section in the documentation\n- Open an issue on the GitHub repository\n- Contact the maintainers\n', + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01EpCs5Tzt8XiiQnKzmo7TnN", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\n\n\nLet's create a documentation index file to tie everything together:", + ftm_tool_calls: [ + { + id: "toolu_01XAkFWtmSdgwToJLQQaq2nZ", + function: { + arguments: + '{"path": "/home/fupfv/git/benchmark1_0701/docs/index.md", "content": "# LLM Load Testing Tool Documentation\\n\\nWelcome to the documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\\n\\n## Documentation Sections\\n\\n- [Quick Start Guide](QUICK_START.md): Get up and running quickly\\n- [Comprehensive Documentation](README.md): Detailed information about all features\\n- [Visualization Guide](VISUALIZATION_GUIDE.md): In-depth guide to the visualization tools\\n- [Developer Guide](DEVELOPER_GUIDE.md): Technical information for developers\\n\\n## About the Tool\\n\\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\\n\\nKey features include:\\n- Testing with different concurrency levels and output token counts\\n- Comprehensive metrics collection (latency, throughput, token generation speed)\\n- Detailed visualization of test results\\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\\n- Comparison of different test runs and models\\n\\n## Getting Started\\n\\nIf you\'re new to the tool, we recommend starting with the [Quick Start Guide](QUICK_START.md) to get up and running quickly. Once you\'re familiar with the basics, you can explore the more detailed documentation to learn about advanced features and customization options.\\n\\n## Support\\n\\nIf you encounter any issues or have questions about the tool, please:\\n- Check the [Troubleshooting](README.md#troubleshooting) section in the documentation\\n- Open an issue on the GitHub repository\\n- Contact the maintainers\\n\\n## Contributing\\n\\nContributions to the LLM Load Testing Tool are welcome! Please see the [Contributing Guidelines](DEVELOPER_GUIDE.md#contributing-guidelines) for more information on how to contribute.\\n\\n## License\\n\\nThis project is licensed under the terms of the LICENSE file included in the repository."}', + name: "create_textdoc", }, - ], - finish_reason: "stop", - }, - { - ftm_role: "tool", - ftm_call_id: "toolu_01QS5KdB2vJApy8dgHkzjmyA", - ftm_content: - "Knowledge entry created successfully with ID: 594ad5ff35\nTools: documentation, markdown, technical writing\nApply to: benchmark1_0701, LLM Load Testing Tool, documentation, visualization tools, developer guide\nSearch Key: Creating comprehensive documentation for LLM load testing tool with Locust, including user guides, visualization documentation, and developer documentation\nLanguage/Framework: python/locust\nEntry: I created a comprehensive documentation suite for an LLM Load Testing Tool built with Python and Locust. The documentation was structured into several key components:\n\n1. Main Documentation (README.md):\n - Provided a complete overview of the tool's functionality\n - Included installation instructions, configuration options, and usage examples\n - Documented all available metrics and how to interpret them\n - Covered troubleshooting and advanced usage scenarios\n\n2. Visualization Guide (VISUALIZATION_GUIDE.md):\n - Detailed explanation of all visualization tools (visualize_results.py, compare_runs.py, dashboard_generator.py)\n - Instructions for generating and interpreting different types of visualizations\n - Best practices for effective visualization and analysis\n - Troubleshooting for visualization-specific issues\n\n3. Developer Guide (DEVELOPER_GUIDE.md):\n - Technical architecture overview of the codebase\n - Instructions for extending the tool with new providers, metrics, and visualizations\n - Code style guidelines and testing recommendations\n - Contributing guidelines for potential contributors\n\n4. Quick Start Guide (QUICK_START.md):\n - Streamlined instructions for getting started quickly\n - Basic configuration and test execution steps\n - Common issues and solutions\n - Pointers to more detailed documentation\n\n5. Documentation Index (index.md):\n - Central navigation hub for all documentation\n - Brief overview of the tool and its capabilities\n - Links to all documentation sections\n\nThe documentation was designed to be comprehensive yet accessible, with clear navigation between different sections. It covers both user-focused aspects (how to run tests and interpret results) and developer-focused aspects (how to extend and contribute to the codebase).\n\nKey insights from analyzing the codebase:\n- The tool uses Locust for load generation and metrics collection\n- It supports multiple LLM providers through a provider class hierarchy\n- Visualization is handled by matplotlib and seaborn\n- The architecture separates concerns between test execution, data collection, and visualization\n- The tool generates both CSV and JSON reports for further analysis\n\nThis documentation structure provides a solid foundation for users to understand and effectively use the LLM Load Testing Tool, while also enabling developers to extend and improve it.", - }, - { - ftm_role: "assistant", - ftm_content: - "\n\nI've created comprehensive documentation for the LLM Load Testing Tool. The documentation is organized into several key components:\n\n1. **Main Documentation (README.md)**\n - Complete overview of the tool's functionality\n - Installation instructions and configuration options\n - Detailed explanation of metrics and reports\n - Advanced usage scenarios and troubleshooting\n\n2. **Visualization Guide (VISUALIZATION_GUIDE.md)**\n - Detailed explanation of all visualization tools\n - Instructions for generating and interpreting visualizations\n - Best practices for effective analysis\n - Troubleshooting for visualization issues\n\n3. **Developer Guide (DEVELOPER_GUIDE.md)**\n - Technical architecture overview\n - Instructions for extending the tool with new providers, metrics, and visualizations\n - Code style guidelines and testing recommendations\n - Contributing guidelines\n\n4. **Quick Start Guide (QUICK_START.md)**\n - Streamlined instructions for getting started quickly\n - Basic configuration and test execution steps\n - Common issues and solutions\n - Pointers to more detailed documentation\n\n5. **Documentation Index (index.md)**\n - Central navigation hub for all documentation\n - Brief overview of the tool and its capabilities\n - Links to all documentation sections\n\nAll documentation files have been created in the `/home/fupfv/git/benchmark1_0701/docs/` directory. The documentation is written in Markdown format, making it easy to read both in a text editor and when rendered as HTML.\n\nThe documentation covers all aspects of the tool, from basic usage to advanced customization and development. It's designed to be accessible to users with different levels of expertise, from those who just want to run a quick benchmark to developers who want to extend the tool with new features.\n\nWould you like me to explain any specific part of the documentation in more detail or make any adjustments to what I've created?", - finish_reason: "stop", - }, - { - ftm_role: "user", - ftm_content: "Explain metrics?", - checkpoints: [ - { - workspace_folder: "/home/fupfv/git/benchmark1_0701", - commit_hash: "4346991fb86d19a726db7bbd0d6f2d990cfc22fd", + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "diff", + ftm_content: [ + { + file_name: "/home/fupfv/git/benchmark1_0701/docs/index.md", + file_action: "edit", + line1: 1, + line2: 1, + lines_remove: "", + lines_add: + "# LLM Load Testing Tool Documentation\n\nWelcome to the documentation for the LLM Load Testing Tool. This tool allows you to benchmark and analyze the performance of Large Language Model (LLM) inference servers under various load conditions.\n\n## Documentation Sections\n\n- [Quick Start Guide](QUICK_START.md): Get up and running quickly\n- [Comprehensive Documentation](README.md): Detailed information about all features\n- [Visualization Guide](VISUALIZATION_GUIDE.md): In-depth guide to the visualization tools\n- [Developer Guide](DEVELOPER_GUIDE.md): Technical information for developers\n\n## About the Tool\n\nThe LLM Load Testing Tool is designed to help you evaluate the performance of LLM inference servers under various load conditions. It uses Locust, a popular load testing framework, to simulate concurrent users sending requests to your LLM server.\n\nKey features include:\n- Testing with different concurrency levels and output token counts\n- Comprehensive metrics collection (latency, throughput, token generation speed)\n- Detailed visualization of test results\n- Support for multiple LLM providers (OpenAI, vLLM, Fireworks, etc.)\n- Comparison of different test runs and models\n\n## Getting Started\n\nIf you're new to the tool, we recommend starting with the [Quick Start Guide](QUICK_START.md) to get up and running quickly. Once you're familiar with the basics, you can explore the more detailed documentation to learn about advanced features and customization options.\n\n## Support\n\nIf you encounter any issues or have questions about the tool, please:\n- Check the [Troubleshooting](README.md#troubleshooting) section in the documentation\n- Open an issue on the GitHub repository\n- Contact the maintainers\n\n## Contributing\n\nContributions to the LLM Load Testing Tool are welcome! Please see the [Contributing Guidelines](DEVELOPER_GUIDE.md#contributing-guidelines) for more information on how to contribute.\n\n## License\n\nThis project is licensed under the terms of the LICENSE file included in the repository.\n", + file_name_rename: null, + application_details: "", + }, + ], + tool_call_id: "toolu_01XAkFWtmSdgwToJLQQaq2nZ", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\n\n\nLet's create a knowledge entry to document what we've done:", + ftm_tool_calls: [ + { + id: "toolu_01QS5KdB2vJApy8dgHkzjmyA", + function: { + arguments: + '{"im_going_to_use_tools": "documentation, markdown, technical writing", "im_going_to_apply_to": "benchmark1_0701, LLM Load Testing Tool, documentation, visualization tools, developer guide", "language_slash_framework": "python/locust", "search_key": "Creating comprehensive documentation for LLM load testing tool with Locust, including user guides, visualization documentation, and developer documentation", "knowledge_entry": "I created a comprehensive documentation suite for an LLM Load Testing Tool built with Python and Locust. The documentation was structured into several key components:\\n\\n1. Main Documentation (README.md):\\n - Provided a complete overview of the tool\'s functionality\\n - Included installation instructions, configuration options, and usage examples\\n - Documented all available metrics and how to interpret them\\n - Covered troubleshooting and advanced usage scenarios\\n\\n2. Visualization Guide (VISUALIZATION_GUIDE.md):\\n - Detailed explanation of all visualization tools (visualize_results.py, compare_runs.py, dashboard_generator.py)\\n - Instructions for generating and interpreting different types of visualizations\\n - Best practices for effective visualization and analysis\\n - Troubleshooting for visualization-specific issues\\n\\n3. Developer Guide (DEVELOPER_GUIDE.md):\\n - Technical architecture overview of the codebase\\n - Instructions for extending the tool with new providers, metrics, and visualizations\\n - Code style guidelines and testing recommendations\\n - Contributing guidelines for potential contributors\\n\\n4. Quick Start Guide (QUICK_START.md):\\n - Streamlined instructions for getting started quickly\\n - Basic configuration and test execution steps\\n - Common issues and solutions\\n - Pointers to more detailed documentation\\n\\n5. Documentation Index (index.md):\\n - Central navigation hub for all documentation\\n - Brief overview of the tool and its capabilities\\n - Links to all documentation sections\\n\\nThe documentation was designed to be comprehensive yet accessible, with clear navigation between different sections. It covers both user-focused aspects (how to run tests and interpret results) and developer-focused aspects (how to extend and contribute to the codebase).\\n\\nKey insights from analyzing the codebase:\\n- The tool uses Locust for load generation and metrics collection\\n- It supports multiple LLM providers through a provider class hierarchy\\n- Visualization is handled by matplotlib and seaborn\\n- The architecture separates concerns between test execution, data collection, and visualization\\n- The tool generates both CSV and JSON reports for further analysis\\n\\nThis documentation structure provides a solid foundation for users to understand and effectively use the LLM Load Testing Tool, while also enabling developers to extend and improve it."}', + name: "create_knowledge", }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "I'd be happy to explain the metrics used in the LLM Load Testing Tool. These metrics are crucial for understanding the performance of your LLM inference server under various load conditions.\n\n## Key Metrics in the LLM Load Testing Tool\n\n### Latency Metrics\n\n1. **Total Latency**\n - **What it measures**: The total time from sending a request to receiving the complete response, measured in milliseconds (ms).\n - **Why it matters**: This is the end-to-end time that a user would experience when interacting with your LLM server.\n - **Interpretation**: Lower values are better. Look for how this increases with concurrency to understand scaling behavior.\n\n2. **Time To First Token (TTFT)**\n - **What it measures**: The time it takes to receive the first token of the response after sending the request, measured in milliseconds (ms).\n - **Why it matters**: This represents the initial latency before any content is displayed to the user, which affects perceived responsiveness.\n - **Interpretation**: Lower values indicate a more responsive system. This metric is particularly important for streaming implementations.\n\n3. **Latency Per Token**\n - **What it measures**: The average time taken to generate each token after the first token, measured in milliseconds per token (ms/token).\n - **Why it matters**: This shows how efficiently the model generates content once it starts responding.\n - **Interpretation**: Lower values indicate faster token generation. This metric helps identify if the model slows down during generation.\n\n### Throughput Metrics\n\n4. **Queries Per Second (QPS)**\n - **What it measures**: The number of complete requests the system can handle per second.\n - **Why it matters**: This indicates the overall throughput capacity of your LLM server.\n - **Interpretation**: Higher values mean your server can handle more simultaneous users. Look for the point where this plateaus to identify maximum capacity.\n\n5. **Num Requests**\n - **What it measures**: The total number of requests processed during the test.\n - **Why it matters**: This helps verify that the test ran as expected and processed the anticipated volume.\n - **Interpretation**: Should match your expectations based on test duration and concurrency.\n\n### Token Metrics\n\n6. **Num Tokens**\n - **What it measures**: The number of tokens generated in the response.\n - **Why it matters**: This helps verify that the model is generating the expected amount of content.\n - **Interpretation**: Should match your configured `max_tokens` parameter. Discrepancies might indicate early stopping or other issues.\n\n7. **Prompt Tokens**\n - **What it measures**: The number of tokens in the input prompt.\n - **Why it matters**: Larger prompts consume more context window and can affect performance.\n - **Interpretation**: Used primarily for verification and to understand the total context usage.\n\n8. **Token Generation Speed**\n - **What it measures**: The number of tokens generated per second, calculated as `Num Tokens / (Total Latency / 1000)`.\n - **Why it matters**: This is a key performance indicator that combines latency and output size.\n - **Interpretation**: Higher values indicate more efficient token generation. This metric often decreases with higher concurrency.\n\n### Success Metrics\n\n9. **Success Rate**\n - **What it measures**: The percentage of requests that completed successfully without errors.\n - **Why it matters**: This indicates the reliability of your LLM server under load.\n - **Interpretation**: Should be close to 100% for a stable system. Lower values indicate potential stability issues.\n\n## How These Metrics Are Collected\n\nThe metrics are collected through several mechanisms:\n\n1. **Locust Metrics**: Basic request/response metrics are collected by Locust during the load test.\n\n2. **Custom Metrics**: The tool adds custom metrics using the `add_custom_metric` function in `load_test.py`.\n\n3. **Response Parsing**: Token counts and other response-specific metrics are extracted by parsing the LLM server's response.\n\n## Visualizing and Analyzing Metrics\n\nThe visualization tools provide several ways to analyze these metrics:\n\n1. **By Concurrency Level**: See how metrics change as the number of concurrent users increases.\n\n2. **By Output Token Count**: Understand the relationship between response size and performance.\n\n3. **Distribution Analysis**: Examine the distribution of latency values to identify outliers and variability.\n\n4. **Heatmaps**: Visualize performance across different combinations of concurrency and output tokens.\n\n5. **Comparison Across Runs**: Compare metrics between different test runs to identify improvements or regressions.\n\n## Using Metrics for Optimization\n\nThese metrics can help you optimize your LLM deployment:\n\n1. **Identifying Bottlenecks**: Look for metrics that degrade significantly at certain concurrency levels.\n\n2. **Scaling Decisions**: Use throughput metrics to determine when to scale horizontally.\n\n3. **Model Selection**: Compare metrics across different models to select the best one for your use case.\n\n4. **Parameter Tuning**: Experiment with different batch sizes, quantization levels, or other parameters and observe the impact on metrics.\n\n5. **Resource Allocation**: Determine the optimal CPU/GPU resources needed based on performance under different loads.\n\nWould you like me to elaborate on any specific metric or aspect of the metrics collection and analysis?", - finish_reason: "stop", - }, - ], - title: "Documentation Generation", - model: "claude-3-7-sonnet", - last_user_message_id: "2296e477-8bd6-45c5-872e-d86cae736ea7", - tool_use: "agent", - mode: "AGENT", - new_chat_suggested: { - wasSuggested: true, - }, - currentMaximumContextTokens: 64000, - currentMessageContextTokens: 1, - read: true, - isTitleGenerated: true, - createdAt: "2025-03-04T06:49:31.563Z", - updatedAt: "2025-03-04T06:49:31.563Z", -}; + type: "function", + index: 0, + }, + ], + finish_reason: "stop", + }, + { + ftm_role: "tool", + ftm_call_id: "toolu_01QS5KdB2vJApy8dgHkzjmyA", + ftm_content: + "Knowledge entry created successfully with ID: 594ad5ff35\nTools: documentation, markdown, technical writing\nApply to: benchmark1_0701, LLM Load Testing Tool, documentation, visualization tools, developer guide\nSearch Key: Creating comprehensive documentation for LLM load testing tool with Locust, including user guides, visualization documentation, and developer documentation\nLanguage/Framework: python/locust\nEntry: I created a comprehensive documentation suite for an LLM Load Testing Tool built with Python and Locust. The documentation was structured into several key components:\n\n1. Main Documentation (README.md):\n - Provided a complete overview of the tool's functionality\n - Included installation instructions, configuration options, and usage examples\n - Documented all available metrics and how to interpret them\n - Covered troubleshooting and advanced usage scenarios\n\n2. Visualization Guide (VISUALIZATION_GUIDE.md):\n - Detailed explanation of all visualization tools (visualize_results.py, compare_runs.py, dashboard_generator.py)\n - Instructions for generating and interpreting different types of visualizations\n - Best practices for effective visualization and analysis\n - Troubleshooting for visualization-specific issues\n\n3. Developer Guide (DEVELOPER_GUIDE.md):\n - Technical architecture overview of the codebase\n - Instructions for extending the tool with new providers, metrics, and visualizations\n - Code style guidelines and testing recommendations\n - Contributing guidelines for potential contributors\n\n4. Quick Start Guide (QUICK_START.md):\n - Streamlined instructions for getting started quickly\n - Basic configuration and test execution steps\n - Common issues and solutions\n - Pointers to more detailed documentation\n\n5. Documentation Index (index.md):\n - Central navigation hub for all documentation\n - Brief overview of the tool and its capabilities\n - Links to all documentation sections\n\nThe documentation was designed to be comprehensive yet accessible, with clear navigation between different sections. It covers both user-focused aspects (how to run tests and interpret results) and developer-focused aspects (how to extend and contribute to the codebase).\n\nKey insights from analyzing the codebase:\n- The tool uses Locust for load generation and metrics collection\n- It supports multiple LLM providers through a provider class hierarchy\n- Visualization is handled by matplotlib and seaborn\n- The architecture separates concerns between test execution, data collection, and visualization\n- The tool generates both CSV and JSON reports for further analysis\n\nThis documentation structure provides a solid foundation for users to understand and effectively use the LLM Load Testing Tool, while also enabling developers to extend and improve it.", + }, + { + ftm_role: "assistant", + ftm_content: + "\n\nI've created comprehensive documentation for the LLM Load Testing Tool. The documentation is organized into several key components:\n\n1. **Main Documentation (README.md)**\n - Complete overview of the tool's functionality\n - Installation instructions and configuration options\n - Detailed explanation of metrics and reports\n - Advanced usage scenarios and troubleshooting\n\n2. **Visualization Guide (VISUALIZATION_GUIDE.md)**\n - Detailed explanation of all visualization tools\n - Instructions for generating and interpreting visualizations\n - Best practices for effective analysis\n - Troubleshooting for visualization issues\n\n3. **Developer Guide (DEVELOPER_GUIDE.md)**\n - Technical architecture overview\n - Instructions for extending the tool with new providers, metrics, and visualizations\n - Code style guidelines and testing recommendations\n - Contributing guidelines\n\n4. **Quick Start Guide (QUICK_START.md)**\n - Streamlined instructions for getting started quickly\n - Basic configuration and test execution steps\n - Common issues and solutions\n - Pointers to more detailed documentation\n\n5. **Documentation Index (index.md)**\n - Central navigation hub for all documentation\n - Brief overview of the tool and its capabilities\n - Links to all documentation sections\n\nAll documentation files have been created in the `/home/fupfv/git/benchmark1_0701/docs/` directory. The documentation is written in Markdown format, making it easy to read both in a text editor and when rendered as HTML.\n\nThe documentation covers all aspects of the tool, from basic usage to advanced customization and development. It's designed to be accessible to users with different levels of expertise, from those who just want to run a quick benchmark to developers who want to extend the tool with new features.\n\nWould you like me to explain any specific part of the documentation in more detail or make any adjustments to what I've created?", + finish_reason: "stop", + }, + { + ftm_role: "user", + ftm_content: "Explain metrics?", + checkpoints: [ + { + workspace_folder: "/home/fupfv/git/benchmark1_0701", + commit_hash: "4346991fb86d19a726db7bbd0d6f2d990cfc22fd", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "I'd be happy to explain the metrics used in the LLM Load Testing Tool. These metrics are crucial for understanding the performance of your LLM inference server under various load conditions.\n\n## Key Metrics in the LLM Load Testing Tool\n\n### Latency Metrics\n\n1. **Total Latency**\n - **What it measures**: The total time from sending a request to receiving the complete response, measured in milliseconds (ms).\n - **Why it matters**: This is the end-to-end time that a user would experience when interacting with your LLM server.\n - **Interpretation**: Lower values are better. Look for how this increases with concurrency to understand scaling behavior.\n\n2. **Time To First Token (TTFT)**\n - **What it measures**: The time it takes to receive the first token of the response after sending the request, measured in milliseconds (ms).\n - **Why it matters**: This represents the initial latency before any content is displayed to the user, which affects perceived responsiveness.\n - **Interpretation**: Lower values indicate a more responsive system. This metric is particularly important for streaming implementations.\n\n3. **Latency Per Token**\n - **What it measures**: The average time taken to generate each token after the first token, measured in milliseconds per token (ms/token).\n - **Why it matters**: This shows how efficiently the model generates content once it starts responding.\n - **Interpretation**: Lower values indicate faster token generation. This metric helps identify if the model slows down during generation.\n\n### Throughput Metrics\n\n4. **Queries Per Second (QPS)**\n - **What it measures**: The number of complete requests the system can handle per second.\n - **Why it matters**: This indicates the overall throughput capacity of your LLM server.\n - **Interpretation**: Higher values mean your server can handle more simultaneous users. Look for the point where this plateaus to identify maximum capacity.\n\n5. **Num Requests**\n - **What it measures**: The total number of requests processed during the test.\n - **Why it matters**: This helps verify that the test ran as expected and processed the anticipated volume.\n - **Interpretation**: Should match your expectations based on test duration and concurrency.\n\n### Token Metrics\n\n6. **Num Tokens**\n - **What it measures**: The number of tokens generated in the response.\n - **Why it matters**: This helps verify that the model is generating the expected amount of content.\n - **Interpretation**: Should match your configured `max_tokens` parameter. Discrepancies might indicate early stopping or other issues.\n\n7. **Prompt Tokens**\n - **What it measures**: The number of tokens in the input prompt.\n - **Why it matters**: Larger prompts consume more context window and can affect performance.\n - **Interpretation**: Used primarily for verification and to understand the total context usage.\n\n8. **Token Generation Speed**\n - **What it measures**: The number of tokens generated per second, calculated as `Num Tokens / (Total Latency / 1000)`.\n - **Why it matters**: This is a key performance indicator that combines latency and output size.\n - **Interpretation**: Higher values indicate more efficient token generation. This metric often decreases with higher concurrency.\n\n### Success Metrics\n\n9. **Success Rate**\n - **What it measures**: The percentage of requests that completed successfully without errors.\n - **Why it matters**: This indicates the reliability of your LLM server under load.\n - **Interpretation**: Should be close to 100% for a stable system. Lower values indicate potential stability issues.\n\n## How These Metrics Are Collected\n\nThe metrics are collected through several mechanisms:\n\n1. **Locust Metrics**: Basic request/response metrics are collected by Locust during the load test.\n\n2. **Custom Metrics**: The tool adds custom metrics using the `add_custom_metric` function in `load_test.py`.\n\n3. **Response Parsing**: Token counts and other response-specific metrics are extracted by parsing the LLM server's response.\n\n## Visualizing and Analyzing Metrics\n\nThe visualization tools provide several ways to analyze these metrics:\n\n1. **By Concurrency Level**: See how metrics change as the number of concurrent users increases.\n\n2. **By Output Token Count**: Understand the relationship between response size and performance.\n\n3. **Distribution Analysis**: Examine the distribution of latency values to identify outliers and variability.\n\n4. **Heatmaps**: Visualize performance across different combinations of concurrency and output tokens.\n\n5. **Comparison Across Runs**: Compare metrics between different test runs to identify improvements or regressions.\n\n## Using Metrics for Optimization\n\nThese metrics can help you optimize your LLM deployment:\n\n1. **Identifying Bottlenecks**: Look for metrics that degrade significantly at certain concurrency levels.\n\n2. **Scaling Decisions**: Use throughput metrics to determine when to scale horizontally.\n\n3. **Model Selection**: Compare metrics across different models to select the best one for your use case.\n\n4. **Parameter Tuning**: Experiment with different batch sizes, quantization levels, or other parameters and observe the impact on metrics.\n\n5. **Resource Allocation**: Determine the optimal CPU/GPU resources needed based on performance under different loads.\n\nWould you like me to elaborate on any specific metric or aspect of the metrics collection and analysis?", + finish_reason: "stop", + }, +].map((message, index) => { + return { + ftm_belongs_to_ft_id: "test", + ftm_num: index, + ftm_alt: 100, + ftm_prev_alt: 100, + ftm_created_ts: Date.now(), + ftm_call_id: "", + ...message, + }; +}); diff --git a/refact-agent/gui/src/__fixtures__/message_lists.ts b/refact-agent/gui/src/__fixtures__/message_lists.ts index 97831ffcb..4cac17a2a 100644 --- a/refact-agent/gui/src/__fixtures__/message_lists.ts +++ b/refact-agent/gui/src/__fixtures__/message_lists.ts @@ -1,6 +1,6 @@ -import { FTMMessage } from "../features/ThreadMessages/makeMessageTrie"; +import type { BaseMessage } from "../services/refact/types"; -export const STUB_ALICE_MESSAGES: FTMMessage[] = [ +export const STUB_ALICE_MESSAGES: BaseMessage[] = [ { ftm_belongs_to_ft_id: "solarthread1", ftm_alt: 100, @@ -44,7 +44,7 @@ export const STUB_ALICE_MESSAGES: FTMMessage[] = [ }, ]; -export const STUB_BRANCHED_MESSAGES: FTMMessage[] = [ +export const STUB_BRANCHED_MESSAGES: BaseMessage[] = [ { ftm_belongs_to_ft_id: "solarthread1", ftm_alt: 100, diff --git a/refact-agent/gui/src/__fixtures__/msw.ts b/refact-agent/gui/src/__fixtures__/msw.ts index 0ca6f593a..527109381 100644 --- a/refact-agent/gui/src/__fixtures__/msw.ts +++ b/refact-agent/gui/src/__fixtures__/msw.ts @@ -258,26 +258,28 @@ export const Experts = graphql.query(ExpertsForGroupDocument, () => { export const ModelsForExpert = graphql.query(ModelsForExpertDocument, () => { return HttpResponse.json({ data: { - expert_choice_consequences: [ - { - provm_name: "claude-3-7-sonnet-20250219", - }, - { - provm_name: "claude-sonnet-4-20250514", - }, - { - provm_name: "gpt-4.1", - }, - { - provm_name: "gpt-4.1-mini", - }, - { - provm_name: "nebius/Qwen/Qwen3-235B-A22B", - }, - { - provm_name: "o4-mini", - }, - ], + expert_choice_consequences: { + models: [ + { + provm_name: "claude-3-7-sonnet-20250219", + }, + { + provm_name: "claude-sonnet-4-20250514", + }, + { + provm_name: "gpt-4.1", + }, + { + provm_name: "gpt-4.1-mini", + }, + { + provm_name: "nebius/Qwen/Qwen3-235B-A22B", + }, + { + provm_name: "o4-mini", + }, + ], + }, }, }); }); diff --git a/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts b/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts index 55f558ff3..c0f228176 100644 --- a/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts +++ b/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts @@ -1,210 +1,207 @@ -import { ChatThread } from "../features/Chat"; +import type { BaseMessage } from "../services/refact/types"; -export const CHAT_WITH_MULTI_MODAL: ChatThread = { - id: "aa7cbc4d-e21f-446f-a4cd-c402f2593e8a", - new_chat_suggested: { - wasSuggested: false, - }, - messages: [ - { - ftm_role: "user", - ftm_content: "make a desktop and mobile screenshots of the index.html\n", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_leDATFRCQJRefjC45EVpS0TW", - function: { - arguments: '{"path":"/Users/kot/code_aprojects/huddle"}', - name: "tree", - }, - type: "function", - index: 0, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_leDATFRCQJRefjC45EVpS0TW", - ftm_content: - "/\n Users/\n kot/\n code_aprojects/\n huddle/\n .gitignore\n README-template.md\n README.md\n index.html\n style-guide.md\n styles.css\n images/\n bg-desktop.svg\n bg-mobile.svg\n favicon-32x32.png\n illustration-mockups.svg\n logo.svg\n design/\n active-states.jpg\n desktop-design.jpg\n desktop-preview.jpg\n mobile-design.jpg", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_035coU8EfPMCt5kyzdjGP1Me", - function: { - arguments: - '{"commands":"open_tab desktop 1\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 1\\nscreenshot 1\\nopen_tab mobile 2\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 2\\nscreenshot 2"}', - name: "chrome", - }, - type: "function", - index: 0, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_035coU8EfPMCt5kyzdjGP1Me", - ftm_content: [ - { - m_type: "text", - m_content: - "Start new chrome process.\nNo opened tabs.\nopened a new tab: tab_id `1` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nopened a new tab: tab_id `2` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\n test tripple ticks \n```\nstuff\n```\n might escape", - }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", - }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", - }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "Here are the screenshots of the `index.html` page for both desktop and mobile views. If you need any further modifications or adjustments, please let me know!", - }, - { - ftm_role: "user", - ftm_content: - "fantastic, now call two chrome() in parallel each running its own tab\n", - }, - { - ftm_role: "assistant", - ftm_content: - "It seems that the tabs are already open. Let's close the existing tabs and then reopen them in parallel.\n\nI'll proceed with closing the tabs first and then reopening them.", - ftm_tool_calls: [ - { - id: "call_Z0bacXQ2J69R8l7SAavCp8IL", - function: { - arguments: - '{"commands": "open_tab desktop 3\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 3"}', - name: "chrome", - }, - type: "function", - index: 0, - }, - { - id: "call_NmC0xtr0Boz6buWVVjpuiDHO", - function: { - arguments: - '{"commands": "open_tab mobile 4\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 4"}', - name: "chrome", - }, - type: "function", - index: 1, - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_Z0bacXQ2J69R8l7SAavCp8IL", - ftm_content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `3` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `3` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", - }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_NmC0xtr0Boz6buWVVjpuiDHO", - ftm_content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `4` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `4` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", - }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "The tabs have been successfully opened in parallel:\n\n- Desktop view in tab 3\n- Mobile view in tab 4\n\nIf you need any further actions or modifications, please let me know!", - }, - { - ftm_role: "user", - ftm_content: "you forgot the screenshots\n", - }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "call_KSF9MxJi5wAUyE7jrVZ8keHq", - function: { - arguments: - '{"commands": "open_tab desktop 5\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 5\\nscreenshot 5"}', - name: "chrome", - }, - type: "function", - index: 0, +export const CHAT_WITH_MULTI_MODAL: BaseMessage[] = [ + { + ftm_role: "user", + ftm_content: "make a desktop and mobile screenshots of the index.html\n", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_leDATFRCQJRefjC45EVpS0TW", + function: { + arguments: '{"path":"/Users/kot/code_aprojects/huddle"}', + name: "tree", }, - { - id: "call_W1ae766eqQMvHBnmVvUoUtfw", - function: { - arguments: - '{"commands": "open_tab mobile 6\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 6\\nscreenshot 6"}', - name: "chrome", - }, - type: "function", - index: 1, + type: "function", + index: 0, + }, + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_leDATFRCQJRefjC45EVpS0TW", + ftm_content: + "/\n Users/\n kot/\n code_aprojects/\n huddle/\n .gitignore\n README-template.md\n README.md\n index.html\n style-guide.md\n styles.css\n images/\n bg-desktop.svg\n bg-mobile.svg\n favicon-32x32.png\n illustration-mockups.svg\n logo.svg\n design/\n active-states.jpg\n desktop-design.jpg\n desktop-preview.jpg\n mobile-design.jpg", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_035coU8EfPMCt5kyzdjGP1Me", + function: { + arguments: + '{"commands":"open_tab desktop 1\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 1\\nscreenshot 1\\nopen_tab mobile 2\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 2\\nscreenshot 2"}', + name: "chrome", }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_KSF9MxJi5wAUyE7jrVZ8keHq", - ftm_content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `5` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + type: "function", + index: 0, + }, + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_035coU8EfPMCt5kyzdjGP1Me", + ftm_content: [ + { + m_type: "text", + m_content: + "Start new chrome process.\nNo opened tabs.\nopened a new tab: tab_id `1` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nopened a new tab: tab_id `2` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\n test tripple ticks \n```\nstuff\n```\n might escape", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "Here are the screenshots of the `index.html` page for both desktop and mobile views. If you need any further modifications or adjustments, please let me know!", + }, + { + ftm_role: "user", + ftm_content: + "fantastic, now call two chrome() in parallel each running its own tab\n", + }, + { + ftm_role: "assistant", + ftm_content: + "It seems that the tabs are already open. Let's close the existing tabs and then reopen them in parallel.\n\nI'll proceed with closing the tabs first and then reopening them.", + ftm_tool_calls: [ + { + id: "call_Z0bacXQ2J69R8l7SAavCp8IL", + function: { + arguments: + '{"commands": "open_tab desktop 3\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 3"}', + name: "chrome", }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", + type: "function", + index: 0, + }, + { + id: "call_NmC0xtr0Boz6buWVVjpuiDHO", + function: { + arguments: + '{"commands": "open_tab mobile 4\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 4"}', + name: "chrome", }, - ], - }, - { - ftm_role: "tool", - ftm_call_id: "call_W1ae766eqQMvHBnmVvUoUtfw", - ftm_content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `6` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + type: "function", + index: 1, + }, + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_Z0bacXQ2J69R8l7SAavCp8IL", + ftm_content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `3` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `3` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_NmC0xtr0Boz6buWVVjpuiDHO", + ftm_content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `4` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `4` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "The tabs have been successfully opened in parallel:\n\n- Desktop view in tab 3\n- Mobile view in tab 4\n\nIf you need any further actions or modifications, please let me know!", + }, + { + ftm_role: "user", + ftm_content: "you forgot the screenshots\n", + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "call_KSF9MxJi5wAUyE7jrVZ8keHq", + function: { + arguments: + '{"commands": "open_tab desktop 5\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 5\\nscreenshot 5"}', + name: "chrome", }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", + type: "function", + index: 0, + }, + { + id: "call_W1ae766eqQMvHBnmVvUoUtfw", + function: { + arguments: + '{"commands": "open_tab mobile 6\\nnavigate_to file:///Users/kot/code_aprojects/huddle/index.html 6\\nscreenshot 6"}', + name: "chrome", }, - ], - }, - { - ftm_role: "assistant", - ftm_content: - "Here are the screenshots of the `index.html` page for both desktop and mobile views, captured in parallel tabs.\n\nIf you need any further modifications or adjustments, please let me know!", - }, - ], - title: "Index.html Screenshots", - model: "gpt-4o", - tool_use: "agent", - read: true, - isTitleGenerated: true, - createdAt: "2024-11-12T08:33:06.826Z", - updatedAt: "2024-11-12T11:53:45.561Z", -}; + type: "function", + index: 1, + }, + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_KSF9MxJi5wAUyE7jrVZ8keHq", + ftm_content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `5` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", + }, + ], + }, + { + ftm_role: "tool", + ftm_call_id: "call_W1ae766eqQMvHBnmVvUoUtfw", + ftm_content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `6` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", + }, + ], + }, + { + ftm_role: "assistant", + ftm_content: + "Here are the screenshots of the `index.html` page for both desktop and mobile views, captured in parallel tabs.\n\nIf you need any further modifications or adjustments, please let me know!", + }, +].map((message, index) => { + return { + ftm_belongs_to_ft_id: "test", + ftm_num: index, + ftm_alt: 100, + ftm_prev_alt: 100, + ftm_created_ts: Date.now(), + ftm_call_id: "", + ...message, + }; +}); diff --git a/refact-agent/gui/src/components/Chat/Chat.stories.tsx b/refact-agent/gui/src/components/Chat/Chat.stories.tsx index f35dec619..88f55a299 100644 --- a/refact-agent/gui/src/components/Chat/Chat.stories.tsx +++ b/refact-agent/gui/src/components/Chat/Chat.stories.tsx @@ -1,7 +1,7 @@ import React from "react"; import type { Meta, StoryObj } from "@storybook/react"; import { Chat } from "./Chat"; -import { ChatThread } from "../../features/Chat/Thread/types"; +// import { ChatThread } from "../../features/Chat/Thread/types"; import { RootState, setUpStore } from "../../app/store"; import { Provider } from "react-redux"; import { Theme } from "../Theme"; @@ -21,26 +21,31 @@ import { import { TourProvider } from "../../features/Tour"; import { Flex } from "@radix-ui/themes"; import { http, HttpResponse } from "msw"; +import { BaseMessage } from "../../services/refact/types"; const Template: React.FC<{ - thread?: ChatThread; + messages: BaseMessage[]; config?: RootState["config"]; -}> = ({ config }) => { +}> = ({ config, messages }) => { const store = setUpStore({ tour: { type: "finished", }, - // chat: { - // streaming: false, - // prevent_send: false, - // waiting_for_response: false, - // max_new_tokens: 4096, - // tool_use: "agent", - // send_immediately: false, - // error: null, - // cache: {}, - // thread: threadData, - // }, + threadMessages: { + waitingBranches: [], + streamingBranches: [], + ft_id: null, + endNumber: 0, + endAlt: 0, + endPrevAlt: 0, + thread: null, + messages: messages.reduce((acc, message) => { + return { + ...acc, + [message.ftm_call_id]: message, + }; + }, {}), + }, config, }); @@ -81,7 +86,7 @@ export const Primary: Story = {}; export const Configuration: Story = { args: { - thread: CHAT_CONFIG_THREAD.thread, + messages: CHAT_CONFIG_THREAD, }, }; @@ -130,28 +135,31 @@ export const Knowledge: Story = { export const EmptySpaceAtBottom: Story = { args: { - thread: { - id: "test", - model: "gpt-4o", // or any model from STUB CAPS REQUEst - messages: [ - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - // { ftm_role: "assistant", ftm_content: "👋" }, - ], - new_chat_suggested: { - wasSuggested: false, + messages: [ + { + ftm_role: "user", + ftm_content: "Hello", }, - }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + // { ftm_role: "assistant", ftm_content: "👋" }, + ].map((message, index) => { + return { + ftm_belongs_to_ft_id: "test", + ftm_num: index, + ftm_alt: 100, + ftm_prev_alt: 100, + ftm_created_ts: Date.now(), + ftm_call_id: "", + ...message, + }; + }), }, parameters: { @@ -170,67 +178,70 @@ export const EmptySpaceAtBottom: Story = { export const UserMessageEmptySpaceAtBottom: Story = { args: { - thread: { - id: "test", - model: "gpt-4o", // or any model from STUB CAPS REQUEst - messages: [ - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - ], - new_chat_suggested: { - wasSuggested: false, + messages: [ + { + ftm_role: "user", + ftm_content: "Hello", }, - }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + { + ftm_role: "user", + ftm_content: "Hello", + }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + { + ftm_role: "user", + ftm_content: "Hello", + }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + { + ftm_role: "user", + ftm_content: "Hello", + }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + ].map((message, index) => { + return { + ftm_belongs_to_ft_id: "test", + ftm_num: index, + ftm_alt: 100, + ftm_prev_alt: 100, + ftm_created_ts: Date.now(), + ftm_call_id: "", + ...message, + }; + }), }, parameters: { @@ -249,69 +260,72 @@ export const UserMessageEmptySpaceAtBottom: Story = { export const CompressButton: Story = { args: { - thread: { - id: "test", - model: "gpt-4o", // or any model from STUB CAPS REQUEst - messages: [ - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - { - ftm_role: "user", - ftm_content: "Hello", - }, - { - ftm_role: "assistant", - ftm_content: "Hi", - }, - { - ftm_role: "user", - ftm_content: "👋", - // change this to see different button colours - compression_strength: "low", - }, - { ftm_role: "assistant", ftm_content: "👋" }, - ], - new_chat_suggested: { - wasSuggested: false, + messages: [ + { + ftm_role: "user", + ftm_content: "Hello", }, - }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + { + ftm_role: "user", + ftm_content: "Hello", + }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + { + ftm_role: "user", + ftm_content: "Hello", + }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + { + ftm_role: "user", + ftm_content: "Hello", + }, + { + ftm_role: "assistant", + ftm_content: "Hi", + }, + { + ftm_role: "user", + ftm_content: "👋", + // change this to see different button colours + compression_strength: "low", + }, + { ftm_role: "assistant", ftm_content: "👋" }, + ].map((message, index) => { + return { + ftm_belongs_to_ft_id: "test", + ftm_num: index, + ftm_alt: 100, + ftm_prev_alt: 100, + ftm_created_ts: Date.now(), + ftm_call_id: "", + ...message, + }; + }), }, parameters: { diff --git a/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx b/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx index 98b98c3a9..c09acc812 100644 --- a/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx +++ b/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx @@ -5,15 +5,14 @@ import { Provider } from "react-redux"; import { RootState, setUpStore } from "../../app/store"; import { Theme } from "../Theme"; import { MarkdownMessage } from "../../__fixtures__/markdown"; -import type { ChatMessages } from "../../services/refact"; -import type { ChatThread } from "../../features/Chat/Thread"; +import type { BaseMessage } from "../../services/refact"; // TODO: update fixtures import { - // CHAT_FUNCTIONS_MESSAGES, - // CHAT_WITH_DIFF_ACTIONS, - // CHAT_WITH_DIFFS, - // FROG_CHAT, - // LARGE_DIFF, + CHAT_FUNCTIONS_MESSAGES, + CHAT_WITH_DIFF_ACTIONS, + CHAT_WITH_DIFFS, + FROG_CHAT, + LARGE_DIFF, CHAT_WITH_MULTI_MODAL, CHAT_CONFIG_THREAD, STUB_LINKS_FOR_CHAT_RESPONSE, @@ -794,11 +793,26 @@ const TEXT_DOC_UPDATE = { }; const MockedStore: React.FC<{ - messages?: ChatMessages; - thread?: ChatThread; + messages?: BaseMessage[]; messageThread?: RootState["threadMessages"]; -}> = () => { - const store = setUpStore({}); +}> = ({ messages, messageThread }) => { + const store = setUpStore({ + threadMessages: { + waitingBranches: [], + streamingBranches: [], + ft_id: null, + endNumber: 0, + endAlt: 0, + endPrevAlt: 0, + thread: null, + messages: messages + ? messages.reduce((acc, cur) => { + return { ...acc, [cur.ftm_call_id]: cur }; + }, {}) + : {}, + ...(messageThread ? messageThread : {}), + }, + }); return ( @@ -826,34 +840,31 @@ export const Primary: Story = {}; export const WithFunctions: Story = { args: { ...meta.args, - // messages: CHAT_FUNCTIONS_MESSAGES, - messages: [], + messages: CHAT_FUNCTIONS_MESSAGES, }, }; export const Notes: Story = { args: { - messages: [], // FROG_CHAT.messages, + messages: FROG_CHAT, }, }; export const WithDiffs: Story = { args: { - messages: [], // CHAT_WITH_DIFFS, + messages: CHAT_WITH_DIFFS, }, }; export const WithDiffActions: Story = { args: { - messages: [], // CHAT_WITH_DIFF_ACTIONS.messages, - // getDiffByIndex: (key: string) => CHAT_WITH_DIFF_ACTIONS.applied_diffs[key], + messages: CHAT_WITH_DIFF_ACTIONS, }, }; export const LargeDiff: Story = { args: { - messages: [], // LARGE_DIFF.messages, - // getDiffByIndex: (key: string) => LARGE_DIFF.applied_diffs[key], + messages: LARGE_DIFF, }, }; @@ -866,7 +877,18 @@ export const Empty: Story = { export const AssistantMarkdown: Story = { args: { ...meta.args, - messages: [{ ftm_role: "assistant", ftm_content: MarkdownMessage }], + messages: [ + { + ftm_role: "assistant", + ftm_content: MarkdownMessage, + ftm_belongs_to_ft_id: "", + ftm_alt: 0, + ftm_num: 1, + ftm_prev_alt: 0, + ftm_call_id: "", + ftm_created_ts: 0, + }, + ], }, }; @@ -878,13 +900,13 @@ export const ToolImages: Story = { export const MultiModal: Story = { args: { - messages: CHAT_WITH_MULTI_MODAL.messages, + messages: CHAT_WITH_MULTI_MODAL, }, }; export const IntegrationChat: Story = { args: { - thread: CHAT_CONFIG_THREAD.thread, + messages: CHAT_CONFIG_THREAD, }, parameters: { msw: { @@ -899,7 +921,7 @@ export const IntegrationChat: Story = { export const TextDoc: Story = { args: { - thread: CHAT_WITH_TEXTDOC, + messages: CHAT_WITH_TEXTDOC, }, parameters: { msw: { @@ -919,7 +941,7 @@ export const TextDoc: Story = { export const MarkdownIssue: Story = { args: { - thread: MARKDOWN_ISSUE, + messages: MARKDOWN_ISSUE, }, parameters: { msw: { @@ -939,40 +961,43 @@ export const MarkdownIssue: Story = { export const ToolWaiting: Story = { args: { - thread: { - ...MARKDOWN_ISSUE, - messages: [ - { ftm_role: "user", ftm_content: "call a tool and wait" }, - { - ftm_role: "assistant", - ftm_content: "", - ftm_tool_calls: [ - { - id: "toolu_01JbWarAwzjMyV6azDkd5skX", - function: { - arguments: '{"use_ast": true}', - name: "tree", - }, - type: "function", - index: 0, + messages: [ + { + ftm_role: "user", + ftm_content: "call a tool and wait", + ftm_belongs_to_ft_id: "", + ftm_alt: 0, + ftm_num: 1, + ftm_prev_alt: 0, + ftm_call_id: "", + ftm_created_ts: 0, + }, + { + ftm_role: "assistant", + ftm_content: "", + ftm_tool_calls: [ + { + id: "toolu_01JbWarAwzjMyV6azDkd5skX", + function: { + arguments: '{"use_ast": true}', + name: "tree", }, - ], - }, - ], - }, + type: "function", + index: 0, + }, + ], + ftm_belongs_to_ft_id: "", + ftm_alt: 0, + ftm_num: 2, + ftm_prev_alt: 0, + ftm_call_id: "", + ftm_created_ts: 0, + }, + ], }, parameters: { msw: { - handlers: [ - goodPing, - - goodUser, - // noChatLinks, - noTools, - - noCompletions, - noCommandPreview, - ], + handlers: [goodPing, goodUser, noTools, noCompletions, noCommandPreview], }, }, }; @@ -983,16 +1008,7 @@ export const TextDocUpdate: Story = { }, parameters: { msw: { - handlers: [ - goodPing, - - goodUser, - // noChatLinks, - noTools, - - noCompletions, - noCommandPreview, - ], + handlers: [goodPing, goodUser, noTools, noCompletions, noCommandPreview], }, }, }; diff --git a/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts b/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts index 01f759bdf..7fbf794c1 100644 --- a/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts +++ b/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts @@ -7,6 +7,13 @@ import { commandsApi, } from "../../services/refact/commands"; import { ChatContextFile } from "../../services/refact/types"; +import { + selectIsStreaming, + selectIsWaiting, + selectMessagesFromEndNode, +} from "../../features/ThreadMessages"; +import { useAppSelector } from "../../hooks/useAppSelector"; +import { formatMessagesForLsp } from "../../services/refact/links"; function useGetCommandCompletionQuery( query: string, @@ -64,9 +71,22 @@ function useCommandCompletion() { // TODO: this needs migrated function useGetCommandPreviewQuery( - _query: string, + query: string, ): (ChatContextFile | string)[] { - return []; + const messages = useAppSelector(selectMessagesFromEndNode); + const messagesToSend = formatMessagesForLsp(messages); + const isWaiting = useAppSelector(selectIsWaiting); + const isStreaming = useAppSelector(selectIsStreaming); + + // TODO: attach images + const { data } = commandsApi.useGetCommandPreviewQuery( + { messages: [...messagesToSend, { role: "user", content: query }] }, + { + skip: isWaiting || isStreaming, + }, + ); + if (!data) return []; + return data.files; } function useGetPreviewFiles(query: string, checkboxes: Checkboxes) { diff --git a/refact-agent/gui/src/components/MessageNode/MessageNode.stories.tsx b/refact-agent/gui/src/components/MessageNode/MessageNode.stories.tsx index d1ab37b9f..dafa9a285 100644 --- a/refact-agent/gui/src/components/MessageNode/MessageNode.stories.tsx +++ b/refact-agent/gui/src/components/MessageNode/MessageNode.stories.tsx @@ -7,47 +7,15 @@ import { } from "../../__fixtures__"; import { makeMessageTrie, - FTMMessage, EmptyNode, } from "../../features/ThreadMessages/makeMessageTrie"; import { Provider } from "react-redux"; import { Theme } from "../Theme"; import { setUpStore } from "../../app/store"; -import type { ChatMessage } from "../../services/refact/types"; import { FTMMessageNode as FTMessageNode } from "../../features/ThreadMessages/makeMessageTrie"; import { MessageNode } from "./MessageNode"; import { STUB_ALICE_MESSAGES } from "../../__fixtures__/message_lists"; -function chatMessagesToCMessages(chatMessages: ChatMessage[]): FTMMessage[] { - const messagesWithSystemMessage: ChatMessage[] = - chatMessages[0].ftm_role === "system" - ? chatMessages - : [ - { ftm_role: "system", ftm_content: "system message" }, - ...chatMessages, - ]; - - return messagesWithSystemMessage.map( - (message: ChatMessage, index) => { - const cmessage: FTMMessage = { - ftm_alt: 0, - ftm_num: index, - ftm_prev_alt: message.ftm_role === "system" ? -1 : 0, - ftm_belongs_to_ft_id: "test", - ftm_role: message.ftm_role, - ftm_content: message.ftm_content, - ftm_tool_calls: - "tool_calls" in message ? message.tool_calls : undefined, - ftm_call_id: "", - ftm_usage: "usage" in message ? message.usage : null, - ftm_created_ts: Date.now(), - }; - - return cmessage; - }, - ); -} - const messageTree = makeMessageTrie(STUB_ALICE_MESSAGES); const Template: React.FC<{ node: FTMessageNode | EmptyNode }> = ({ node }) => { @@ -78,7 +46,7 @@ export const Primary: StoryObj = { export const Textdoc: StoryObj = { args: { - node: makeMessageTrie(chatMessagesToCMessages(CHAT_WITH_TEXTDOC.messages)), + node: makeMessageTrie(CHAT_WITH_TEXTDOC), }, }; @@ -90,8 +58,6 @@ export const Knowledge: StoryObj = { export const MultiModal: StoryObj = { args: { - node: makeMessageTrie( - chatMessagesToCMessages(CHAT_WITH_MULTI_MODAL.messages), - ), + node: makeMessageTrie(CHAT_WITH_MULTI_MODAL), }, }; diff --git a/refact-agent/gui/src/components/MessageNode/MessageNode.tsx b/refact-agent/gui/src/components/MessageNode/MessageNode.tsx index a1a2b5b7d..fe592006a 100644 --- a/refact-agent/gui/src/components/MessageNode/MessageNode.tsx +++ b/refact-agent/gui/src/components/MessageNode/MessageNode.tsx @@ -41,9 +41,8 @@ const ElementForNodeMessage: React.FC<{ if (isAssistantMessage(message)) { // find the tool result for the tool cal - // TODO: why is this an error + // TODO: why is this an error?, could be FTMessageNode ? return ( - // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment {message.ftm_content} @@ -55,11 +54,7 @@ const ElementForNodeMessage: React.FC<{ } if (isChatContextFileMessage(message)) { - const files = parseOrElse( - // TODO: narrow the types for messages. - message.ftm_content as string, - [], - ); + const files = parseOrElse(message.ftm_content, []); return ; } diff --git a/refact-agent/gui/src/events/index.ts b/refact-agent/gui/src/events/index.ts index f3cc46854..2bd4da9be 100644 --- a/refact-agent/gui/src/events/index.ts +++ b/refact-agent/gui/src/events/index.ts @@ -1,14 +1,5 @@ // Careful with exports that include components, it'll cause this to compile to a large file. import type { FileInfo } from "../features/Chat/activeFile"; -// TODO: this cause more exports than needed :/ -export { - type ChatThread, - type Chat, - type ToolUse, -} from "../features/Chat/Thread/types"; -// TODO: this may need to be re-created -// export { newChatAction } from "../features/Chat/Thread/actions"; -import { type Chat } from "../features/Chat/Thread/types"; import type { Snippet } from "../features/Chat/selectedSnippet"; import type { Config } from "../features/Config/configSlice"; import type { ErrorSliceState } from "../features/Errors/errorsSlice"; @@ -33,6 +24,7 @@ export { type CurrentProjectInfo, } from "../features/Chat/currentProject"; export type { TextDocToolCall } from "../components/Tools/types"; +export type { BaseMessage as ThreadMessage } from "../services/refact"; // here export type { @@ -43,11 +35,6 @@ export type { ToolEditResult, } from "../services/refact"; -import { MessagesSubscriptionSubscription } from "../../generated/documents"; - -export type ThreadMessage = - MessagesSubscriptionSubscription["comprehensive_thread_subs"]["news_payload_thread_message"]; - export type { FThreadMultipleMessagesInput, FThreadMessageInput, @@ -62,7 +49,6 @@ export type InitialState = { config: Config; active_file: FileInfo; selected_snippet: Snippet; - chat: Chat; error: ErrorSliceState; pages: PageSliceState; current_project: CurrentProjectInfo; @@ -75,7 +61,7 @@ export { ideNewFileAction, ideOpenHotKeys, ideOpenSettingsAction, - ideOpenChatInNewTab, + // ideOpenChatInNewTab, ideAnimateFileStart, ideAnimateFileStop, ideChatPageChange, diff --git a/refact-agent/gui/src/features/Chat/Thread/index.ts b/refact-agent/gui/src/features/Chat/Thread/index.ts deleted file mode 100644 index eea524d65..000000000 --- a/refact-agent/gui/src/features/Chat/Thread/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./types"; diff --git a/refact-agent/gui/src/features/Chat/Thread/types.ts b/refact-agent/gui/src/features/Chat/Thread/types.ts deleted file mode 100644 index c66313c00..000000000 --- a/refact-agent/gui/src/features/Chat/Thread/types.ts +++ /dev/null @@ -1,157 +0,0 @@ -import { FThreadMessageSubs } from "../../../../generated/documents"; -import { Usage } from "../../../services/refact"; -import { ChatMessages } from "../../../services/refact/types"; -import { parseOrElse } from "../../../utils/parseOrElse"; - -export type IntegrationMeta = { - name?: string; - path?: string; - project?: string; - shouldIntermediatePageShowUp?: boolean; -}; - -export function isIntegrationMeta(json: unknown): json is IntegrationMeta { - if (!json || typeof json !== "object") return false; - if (!("name" in json) || !("path" in json) || !("project" in json)) { - return false; - } - return true; -} - -export interface MessageWithIntegrationMeta - extends Omit< - FThreadMessageSubs["news_payload_thread_message"], - "ftm_user_preferences" - > { - ftm_user_preferences: { integration: IntegrationMeta }; -} - -export function isMessageWithIntegrationMeta( - message: unknown, -): message is MessageWithIntegrationMeta { - if (!message || typeof message !== "object") return false; - if (!("ftm_user_preferences" in message)) return false; - if ( - !message.ftm_user_preferences || - typeof message.ftm_user_preferences !== "object" - ) - return false; - const preferences = message.ftm_user_preferences as Record; - if (!("integration" in preferences)) return false; - return isIntegrationMeta(preferences.integration); -} -export type ChatThread = { - id: string; - messages: ChatMessages; - model: string; - title?: string; - createdAt?: string; - updatedAt?: string; - tool_use?: ToolUse; - read?: boolean; - isTitleGenerated?: boolean; - boost_reasoning?: boolean; - integration?: IntegrationMeta | null; - mode?: LspChatMode; - project_name?: string; - last_user_message_id?: string; - new_chat_suggested: SuggestedChat; - automatic_patch?: boolean; - currentMaximumContextTokens?: number; - currentMessageContextTokens?: number; - increase_max_tokens?: boolean; -}; - -export type SuggestedChat = { - wasSuggested: boolean; - wasRejectedByUser?: boolean; -}; - -export type ToolUse = "quick" | "explore" | "agent"; - -export type Chat = { - streaming: boolean; - thread: ChatThread; - error: null | string; - prevent_send: boolean; - checkpoints_enabled?: boolean; - waiting_for_response: boolean; - max_new_tokens?: number; - cache: Record; - tool_use: ToolUse; - send_immediately: boolean; - follow_ups_enabled?: boolean; - title_generation_enabled?: boolean; -}; - -export type PayloadWithId = { id: string }; -export type PayloadWithChatAndNumber = { chatId: string; value: number }; -export type PayloadWithChatAndMessageId = { chatId: string; messageId: string }; -export type PayloadWithChatAndBoolean = { chatId: string; value: boolean }; -export type PayloadWithChatAndUsage = { chatId: string; usage: Usage }; -export type PayloadWithChatAndCurrentUsage = { - chatId: string; - n_ctx: number; - prompt_tokens: number; -}; -export type PayloadWithIdAndTitle = { - title: string; - isTitleGenerated: boolean; -} & PayloadWithId; - -export type DetailMessage = { detail: string }; - -function isDetailMessage(json: unknown): json is DetailMessage { - if (!json) return false; - if (typeof json !== "object") return false; - return "detail" in json && typeof json.detail === "string"; -} - -export function checkForDetailMessage(str: string): DetailMessage | false { - const json = parseOrElse(str, {}); - if (isDetailMessage(json)) return json; - return false; -} - -export function isToolUse(str: string): str is ToolUse { - if (!str) return false; - if (typeof str !== "string") return false; - return str === "quick" || str === "explore" || str === "agent"; -} - -export type LspChatMode = - | "NO_TOOLS" - | "EXPLORE" - | "AGENT" - | "CONFIGURE" - | "PROJECT_SUMMARY"; - -export function isLspChatMode(mode: string): mode is LspChatMode { - return ( - mode === "NO_TOOLS" || - mode === "EXPLORE" || - mode === "AGENT" || - mode === "CONFIGURE" || - mode === "PROJECT_SUMMARY" - ); -} - -export function chatModeToLspMode({ - toolUse, - mode, - defaultMode, -}: { - toolUse?: ToolUse; - mode?: LspChatMode; - defaultMode?: LspChatMode; -}): LspChatMode { - if (defaultMode) { - return defaultMode; - } - if (mode) { - return mode; - } - if (toolUse === "agent") return "AGENT"; - if (toolUse === "quick") return "NO_TOOLS"; - return "EXPLORE"; -} diff --git a/refact-agent/gui/src/features/Chat/Thread/utils.ts b/refact-agent/gui/src/features/Chat/Thread/utils.ts deleted file mode 100644 index 9d8b67f22..000000000 --- a/refact-agent/gui/src/features/Chat/Thread/utils.ts +++ /dev/null @@ -1,173 +0,0 @@ -import { - // AssistantMessage, - // ChatContextFile, - ChatContextFileMessage, - // ChatMessage, - ChatMessages, - // ChatResponse, - // DiffChunk, - // SubchatResponse, - ToolMessage, - UserMessage, - // isAssistantDelta, - isAssistantMessage, - // isCDInstructionResponse, - // isChatContextFileDelta, - // isChatResponseChoice, - // isContextFileResponse, - isDiffChunk, - isDiffMessage, - // isDiffResponse, - isLspUserMessage, - // isPlainTextResponse, - // isSubchatContextFileResponse, - // isSubchatResponse, - // isSystemResponse, - // isToolCallDelta, - // isThinkingBlocksDelta, - isToolContent, - isToolMessage, - // isToolResponse, - isUserMessage, - // isUserResponse, - // ThinkingBlock, - // isToolCallMessage, - // Usage, - LSPUserMessage, -} from "../../../services/refact"; -import { type LspChatMessage } from "../../../services/refact"; - -export function formatMessagesForLsp(messages: ChatMessages): LspChatMessage[] { - return messages.reduce((acc, message) => { - if (isUserMessage(message)) { - const { ftm_role, ftm_content, ...rest } = message; - const msg: LSPUserMessage = { - ...rest, - role: ftm_role, - content: ftm_content, - }; - return acc.concat([msg]); - } - - if (isAssistantMessage(message)) { - return acc.concat([ - { - role: message.ftm_role, - content: message.ftm_content, - tool_calls: message.ftm_tool_calls ?? undefined, - thinking_blocks: message.thinking_blocks ?? undefined, - finish_reason: message.finish_reason, - usage: message.usage, - }, - ]); - } - - if (isToolMessage(message)) { - return acc.concat([ - { - role: "tool", - content: message.ftm_content, - tool_call_id: message.ftm_call_id, - }, - ]); - } - - if (isDiffMessage(message)) { - const diff = { - role: message.ftm_role, - content: JSON.stringify(message.ftm_content), - tool_call_id: message.tool_call_id, - }; - return acc.concat([diff]); - } - - const ftm_content = - typeof message.ftm_content === "string" - ? message.ftm_content - : JSON.stringify(message.ftm_content); - return [...acc, { role: message.ftm_role, content: ftm_content }]; - }, []); -} - -export function formatMessagesForChat( - messages: LspChatMessage[], -): ChatMessages { - return messages.reduce((acc, message) => { - if (isLspUserMessage(message) && typeof message.content === "string") { - const userMessage: UserMessage = { - ftm_role: message.role, - ftm_content: message.content, - checkpoints: message.checkpoints, - }; - return acc.concat(userMessage); - } - - if (message.role === "assistant") { - const { role, content, ...rest } = message; - return acc.concat({ - ftm_role: role, - ftm_content: content, - ...rest, - }); - } - - if ( - message.role === "context_file" && - typeof message.content === "string" - ) { - const contextFileMessage: ChatContextFileMessage = { - ftm_role: message.role, - ftm_content: message.content, - }; - return acc.concat(contextFileMessage); - } - - if (message.role === "system" && typeof message.content === "string") { - return acc.concat({ - ftm_role: message.role, - ftm_content: message.content, - }); - } - - if (message.role === "plain_text" && typeof message.content === "string") { - return acc.concat({ - ftm_role: message.role, - ftm_content: message.content, - }); - } - - if ( - message.role === "cd_instruction" && - typeof message.content === "string" - ) { - return acc.concat({ - ftm_role: message.role, - ftm_content: message.content, - }); - } - - if ( - message.role === "tool" && - (typeof message.content === "string" || isToolContent(message.content)) && - typeof message.tool_call_id === "string" - ) { - // TODO: why type cast this - return acc.concat(message as unknown as ToolMessage); - } - - if ( - message.role === "diff" && - Array.isArray(message.content) && - message.content.every(isDiffChunk) && - typeof message.tool_call_id === "string" - ) { - return acc.concat({ - ftm_role: message.role, - ftm_content: message.content, - tool_call_id: message.tool_call_id, - }); - } - - return acc; - }, []); -} diff --git a/refact-agent/gui/src/features/Chat/index.ts b/refact-agent/gui/src/features/Chat/index.ts index 0148140d0..5168fc3e5 100644 --- a/refact-agent/gui/src/features/Chat/index.ts +++ b/refact-agent/gui/src/features/Chat/index.ts @@ -1,4 +1,3 @@ export { Chat, type ChatProps } from "./Chat"; export * from "./activeFile"; -export * from "./Thread"; export * from "./selectedSnippet"; diff --git a/refact-agent/gui/src/features/ExpertsAndModels/expertsSlice.ts b/refact-agent/gui/src/features/ExpertsAndModels/expertsSlice.ts index 47e6395ed..fcf69c9f2 100644 --- a/refact-agent/gui/src/features/ExpertsAndModels/expertsSlice.ts +++ b/refact-agent/gui/src/features/ExpertsAndModels/expertsSlice.ts @@ -11,7 +11,7 @@ type InitialState = { | ExpertsForGroupQuery["experts_effective_list"][number]["fexp_id"] | null; selectedModel: - | ModelsForExpertQuery["expert_choice_consequences"][number]["provm_name"] + | ModelsForExpertQuery["expert_choice_consequences"]["models"][number]["provm_name"] | null; }; @@ -64,7 +64,7 @@ export const expertsSlice = createSlice({ builder.addMatcher( graphqlQueriesAndMutations.endpoints.modelsForExpert.matchFulfilled, (state, action) => { - const names = action.payload.expert_choice_consequences.map( + const names = action.payload.expert_choice_consequences.models.map( (model) => model.provm_name, ); if (!state.selectedModel && names.length > 0) { diff --git a/refact-agent/gui/src/features/ExpertsAndModels/useModelsForExpert.ts b/refact-agent/gui/src/features/ExpertsAndModels/useModelsForExpert.ts index e4478012e..0dd4dbd60 100644 --- a/refact-agent/gui/src/features/ExpertsAndModels/useModelsForExpert.ts +++ b/refact-agent/gui/src/features/ExpertsAndModels/useModelsForExpert.ts @@ -31,7 +31,7 @@ export const useModelsForExpert = () => { const options = useMemo(() => { if (!modelsForExpertRequest.data) return []; - return modelsForExpertRequest.data.expert_choice_consequences.map( + return modelsForExpertRequest.data.expert_choice_consequences.models.map( (model) => model.provm_name, ); }, [modelsForExpertRequest.data]); diff --git a/refact-agent/gui/src/features/Login/LoginPage.tsx b/refact-agent/gui/src/features/Login/LoginPage.tsx index 07c5776b8..46643f782 100644 --- a/refact-agent/gui/src/features/Login/LoginPage.tsx +++ b/refact-agent/gui/src/features/Login/LoginPage.tsx @@ -15,7 +15,7 @@ import { Accordion } from "../../components/Accordion"; import { useLogin, useEmailLogin, useEventsBusForIDE } from "../../hooks"; import { UnderConstruction } from "./UnderConstruction"; -const IS_LOGIN_DISABLED = false; +const IS_LOGIN_DISABLED = true; export const LoginPage: React.FC = () => { const { loginWithProvider, polling, cancelLogin } = useLogin(); diff --git a/refact-agent/gui/src/features/ThreadMessages/makeMessageTrie.ts b/refact-agent/gui/src/features/ThreadMessages/makeMessageTrie.ts index 6623135e6..7bb223303 100644 --- a/refact-agent/gui/src/features/ThreadMessages/makeMessageTrie.ts +++ b/refact-agent/gui/src/features/ThreadMessages/makeMessageTrie.ts @@ -1,9 +1,13 @@ import { partition } from "../../utils"; -import { MessagesSubscriptionSubscription } from "../../../generated/documents"; +// import { MessagesSubscriptionSubscription } from "../../../generated/documents"; +import type { + // ChatMessage, + BaseMessage, +} from "../../services/refact/types"; -export type FTMMessage = NonNullable< - MessagesSubscriptionSubscription["comprehensive_thread_subs"]["news_payload_thread_message"] ->; +// export type FTMMessage = NonNullable< +// MessagesSubscriptionSubscription["comprehensive_thread_subs"]["news_payload_thread_message"] +// >; interface Node { value: T; @@ -12,7 +16,7 @@ interface Node { export type EmptyNode = Node; -export type FTMMessageNode = Node; +export type FTMMessageNode = Node; export function isEmptyNode( node: EmptyNode | FTMMessageNode, @@ -24,7 +28,7 @@ export function isEmptyNode( // return message.ftm_prev_alt === -1; // }; -export function sortMessageList(messages: FTMMessage[]): FTMMessage[] { +export function sortMessageList(messages: BaseMessage[]): BaseMessage[] { return messages.slice(0).sort((a, b) => { if (a.ftm_num === b.ftm_num) { return a.ftm_alt - b.ftm_alt; @@ -34,7 +38,7 @@ export function sortMessageList(messages: FTMMessage[]): FTMMessage[] { } export const makeMessageTrie = ( - messages: FTMMessage[], + messages: BaseMessage[], ): FTMMessageNode | EmptyNode => { if (messages.length === 0) return { value: null, children: [] }; const sortedMessages = sortMessageList(messages); @@ -52,8 +56,8 @@ export const makeMessageTrie = ( }; function getChildren( - parent: FTMMessage, - messages: FTMMessage[], + parent: BaseMessage, + messages: BaseMessage[], ): FTMMessageNode[] { if (messages.length === 0) return []; const rowNumber = parent.ftm_num + 1; @@ -70,8 +74,8 @@ export function getAncestorsForNode( num: number, alt: number, prevAlt: number, - messages: FTMMessage[], -): FTMMessage[] { + messages: BaseMessage[], +): BaseMessage[] { // TODO: dummy node might cause this to be off by one. const child = messages.find( @@ -86,9 +90,9 @@ export function getAncestorsForNode( } function getParentsIter( - child: FTMMessage, - messages: FTMMessage[], - memo: FTMMessage[] = [], + child: BaseMessage, + messages: BaseMessage[], + memo: BaseMessage[] = [], ) { const maybeParent = findParent(child.ftm_num, child.ftm_prev_alt, messages); const collected = [child, ...memo]; @@ -100,8 +104,8 @@ function getParentsIter( function findParent( num: number, prevAlt: number, - messages: FTMMessage[], -): FTMMessage | undefined { + messages: BaseMessage[], +): BaseMessage | undefined { return messages.find((message) => { return message.ftm_num === num - 1 && message.ftm_alt === prevAlt; }); diff --git a/refact-agent/gui/src/features/ThreadMessages/threadMessagesSlice.ts b/refact-agent/gui/src/features/ThreadMessages/threadMessagesSlice.ts index 7816806a8..ec3bd8a74 100644 --- a/refact-agent/gui/src/features/ThreadMessages/threadMessagesSlice.ts +++ b/refact-agent/gui/src/features/ThreadMessages/threadMessagesSlice.ts @@ -4,11 +4,8 @@ import { type PayloadAction, } from "@reduxjs/toolkit"; import { MessagesSubscriptionSubscription } from "../../../generated/documents"; -import { - FTMMessage, - makeMessageTrie, - getAncestorsForNode, -} from "./makeMessageTrie"; +import { makeMessageTrie, getAncestorsForNode } from "./makeMessageTrie"; +import type { BaseMessage } from "../../services/refact/types"; import { pagesSlice } from "../Pages/pagesSlice"; import { graphqlQueriesAndMutations } from "../../services/graphql"; @@ -18,11 +15,8 @@ import { ToolMessage, isToolMessage, } from "../../services/refact"; -import { - isMessageWithIntegrationMeta, - MessageWithIntegrationMeta, -} from "../Chat"; -import { takeWhile } from "../../utils"; + +import { Override, takeWhile } from "../../utils"; // TODO: move this somewhere export type ToolConfirmationRequest = { @@ -60,10 +54,47 @@ type Message = NonNullable< MessagesSubscriptionSubscription["comprehensive_thread_subs"]["news_payload_thread_message"] >; -type InitialState = { +export type IntegrationMeta = { + name?: string; + path?: string; + project?: string; + shouldIntermediatePageShowUp?: boolean; +}; + +export function isIntegrationMeta(json: unknown): json is IntegrationMeta { + if (!json || typeof json !== "object") return false; + if (!("name" in json) || !("path" in json) || !("project" in json)) { + return false; + } + return true; +} + +export type MessageWithIntegrationMeta = Override< + Message, + { + ftm_user_preferences: { integration: IntegrationMeta }; + } +>; + +export function isMessageWithIntegrationMeta( + message: unknown, +): message is MessageWithIntegrationMeta { + if (!message || typeof message !== "object") return false; + if (!("ftm_user_preferences" in message)) return false; + if ( + !message.ftm_user_preferences || + typeof message.ftm_user_preferences !== "object" + ) + return false; + const preferences = message.ftm_user_preferences as Record; + if (!("integration" in preferences)) return false; + return isIntegrationMeta(preferences.integration); +} + +export type MessagesInitialState = { waitingBranches: number[]; // alt numbers streamingBranches: number[]; // alt number - messages: Record; + messages: Record; ft_id: string | null; endNumber: number; endAlt: number; @@ -71,7 +102,7 @@ type InitialState = { thread: Thread | null; }; -const initialState: InitialState = { +const initialState: MessagesInitialState = { waitingBranches: [], streamingBranches: [], messages: {}, @@ -99,7 +130,7 @@ function getInfoFromId(id: string) { // https://github.com/reduxjs/redux-toolkit/discussions/4553 see this for creating memoized selectors const selectMessagesValues = createSelector( - (state: InitialState) => state.messages, + (state: MessagesInitialState) => state.messages, (messages) => Object.values(messages), ); @@ -179,7 +210,7 @@ export const threadMessagesSlice = createSlice({ const infoFromId = getInfoFromId(action.payload.news_payload_id); if (!infoFromId) return state; if (!(action.payload.news_payload_id in state.messages)) { - const msg: FTMMessage = { + const msg: BaseMessage = { ...infoFromId, ftm_role: action.payload.stream_delta.ftm_role, // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment @@ -267,7 +298,10 @@ export const threadMessagesSlice = createSlice({ }, // TODO: check where this is used - setThreadFtId: (state, action: PayloadAction) => { + setThreadFtId: ( + state, + action: PayloadAction, + ) => { state.ft_id = action.payload; }, }, @@ -458,11 +492,11 @@ export const threadMessagesSlice = createSlice({ const maybeIntegrationMeta = messages.find(isMessageWithIntegrationMeta); if (!maybeIntegrationMeta) return null; // TODO: any types are causing issues here - const message = maybeIntegrationMeta as MessageWithIntegrationMeta; + const message = maybeIntegrationMeta; return message.ftm_user_preferences.integration; }), - selectMessageIsLastOfType: (state, message: FTMMessage) => { + selectMessageIsLastOfType: (state, message: BaseMessage) => { const { endNumber, endAlt, endPrevAlt, messages } = state; const currentBranch = getAncestorsForNode( endNumber, diff --git a/refact-agent/gui/src/hooks/useEventBusForIDE.ts b/refact-agent/gui/src/hooks/useEventBusForIDE.ts index aed4f6695..18925f642 100644 --- a/refact-agent/gui/src/hooks/useEventBusForIDE.ts +++ b/refact-agent/gui/src/hooks/useEventBusForIDE.ts @@ -1,8 +1,6 @@ import { useCallback } from "react"; import { createAction } from "@reduxjs/toolkit"; import { usePostMessage } from "./usePostMessage"; -// TODO: remove this -import type { ChatThread } from "../features/Chat/Thread/types"; import { EVENT_NAMES_FROM_SETUP, HostSettings, @@ -32,10 +30,6 @@ export type OpenFilePayload = { }; export const ideOpenFile = createAction("ide/openFile"); -export const ideOpenChatInNewTab = createAction( - "ide/openChatInNewTab", -); - export const ideAnimateFileStart = createAction( "ide/animateFile/start", ); @@ -91,7 +85,6 @@ export const ideClearActiveTeamsWorkspace = createAction( export const useEventsBusForIDE = () => { const postMessage = usePostMessage(); - // const canPaste = useAppSelector((state) => state.active_file.can_paste); const startFileAnimation = useCallback( (fileName: string) => { @@ -169,14 +162,6 @@ export const useEventsBusForIDE = () => { [getFullPath, postMessage], ); - const openChatInNewTab = useCallback( - (thread: ChatThread) => { - const action = ideOpenChatInNewTab(thread); - postMessage(action); - }, - [postMessage], - ); - const chatPageChange = useCallback( (page: string) => { const action = ideChatPageChange(page); @@ -299,7 +284,6 @@ export const useEventsBusForIDE = () => { newFile, openHotKeys, openFile, - openChatInNewTab, setupHost, queryPathThenOpenFile, openCustomizationFile, diff --git a/refact-agent/gui/src/hooks/useSendMessages.ts b/refact-agent/gui/src/hooks/useSendMessages.ts index 45aa5cbcf..e2f34772b 100644 --- a/refact-agent/gui/src/hooks/useSendMessages.ts +++ b/refact-agent/gui/src/hooks/useSendMessages.ts @@ -11,11 +11,11 @@ import { selectCurrentModel, } from "../features/ExpertsAndModels/expertsSlice"; import { Tool } from "../services/refact/tools"; -import { selectAllImages } from "../features/AttachedImages/imagesSlice"; -import { - UserMessage, - UserMessageContentWithImage, -} from "../services/refact/types"; +// import { selectAllImages } from "../features/AttachedImages/imagesSlice"; +// import { +// UserMessage, +// UserMessageContentWithImage, +// } from "../services/refact/types"; import { useIdForThread } from "./useIdForThread"; import { graphqlQueriesAndMutations } from "../services/graphql/queriesAndMutationsApi"; @@ -33,7 +33,7 @@ export function useSendMessages() { const selectedExpert = useAppSelector(selectCurrentExpert); const selectedModel = useAppSelector(selectCurrentModel); - const attachedImages = useAppSelector(selectAllImages); + // const attachedImages = useAppSelector(selectAllImages); const [sendMessages, _sendMessagesResult] = graphqlQueriesAndMutations.useSendMessagesMutation(); @@ -44,37 +44,38 @@ export function useSendMessages() { const [getTools, _getToolsResult] = useGetToolsLazyQuery(); - const maybeAddImagesToQuestion = useCallback( - (question: string): UserMessage => { - if (attachedImages.length === 0) - return { - ftm_role: "user" as const, - ftm_content: question, - checkpoints: [], - }; - - const images = attachedImages.reduce( - (acc, image) => { - if (typeof image.content !== "string") return acc; - return acc.concat({ - type: "image_url", - image_url: { url: image.content }, - }); - }, - [], - ); - - if (images.length === 0) - return { ftm_role: "user", ftm_content: question, checkpoints: [] }; - - return { - ftm_role: "user", - ftm_content: [...images, { type: "text", text: question }], - checkpoints: [], - }; - }, - [attachedImages], - ); + // TODO: enable this + // const maybeAddImagesToQuestion = useCallback( + // (question: string): UserMessage => { + // if (attachedImages.length === 0) + // return { + // ftm_role: "user" as const, + // ftm_content: question, + // checkpoints: [], + // }; + + // const images = attachedImages.reduce( + // (acc, image) => { + // if (typeof image.content !== "string") return acc; + // return acc.concat({ + // type: "image_url", + // image_url: { url: image.content }, + // }); + // }, + // [], + // ); + + // if (images.length === 0) + // return { ftm_role: "user", ftm_content: question, checkpoints: [] }; + + // return { + // ftm_role: "user", + // ftm_content: [...images, { type: "text", text: question }], + // checkpoints: [], + // }; + // }, + // [attachedImages], + // ); const sendMultipleMessages = useCallback( async (messages: { ftm_role: string; ftm_content: unknown }[]) => { @@ -196,5 +197,9 @@ export function useSendMessages() { ], ); - return { sendMessage, sendMultipleMessages, maybeAddImagesToQuestion }; + return { + sendMessage, + sendMultipleMessages, + // maybeAddImagesToQuestion + }; } diff --git a/refact-agent/gui/src/hooks/useSmartLinks.ts b/refact-agent/gui/src/hooks/useSmartLinks.ts index a84a8f23f..94719b12f 100644 --- a/refact-agent/gui/src/hooks/useSmartLinks.ts +++ b/refact-agent/gui/src/hooks/useSmartLinks.ts @@ -1,7 +1,6 @@ import { useCallback } from "react"; import { LspChatMessage } from "../services/refact/chat"; -import { formatMessagesForChat } from "../features/Chat/Thread/utils"; import { useAppDispatch } from "./useAppDispatch"; import { clearInformation } from "../features/Errors/informationSlice"; @@ -40,8 +39,11 @@ export function useSmartLinks() { .filter((tool) => tool.enabled) .map((tool) => tool.spec); - // TODO: change this to flexus format - const messages = formatMessagesForChat(sl_chat); + // TODO: change this to flexus format, when / if smart links are enabled + // const messages = formatMessagesForChat(sl_chat); + const messages = sl_chat.map((message) => { + return { ftm_role: message.role, ftm_content: message.content }; + }); dispatch(clearInformation()); // TODO: when in an integration, we should enable all patch like tool requests void createThreadWitMultipleMessages({ diff --git a/refact-agent/gui/src/services/graphql/flexus.graphql b/refact-agent/gui/src/services/graphql/flexus.graphql index 9d5fbff8d..e29b3a9ac 100644 --- a/refact-agent/gui/src/services/graphql/flexus.graphql +++ b/refact-agent/gui/src/services/graphql/flexus.graphql @@ -70,9 +70,7 @@ subscription MessagesSubscription($ft_id: String!, $want_deltas: Boolean!) { } mutation MessageCreateMultiple($input: FThreadMultipleMessagesInput!) { - thread_messages_create_multiple(input: $input) { - count - } + thread_messages_create_multiple(input: $input) } mutation ThreadPatch($id: String!, $message: String!) { @@ -93,7 +91,9 @@ query ModelsForExpert($fexp_id: String!, $inside_fgroup_id: String!) { fexp_id: $fexp_id inside_fgroup_id: $inside_fgroup_id ) { - provm_name + models { + provm_name + } } } diff --git a/refact-agent/gui/src/services/graphql/queriesAndMutationsApi.ts b/refact-agent/gui/src/services/graphql/queriesAndMutationsApi.ts index 8d465358f..01af928c6 100644 --- a/refact-agent/gui/src/services/graphql/queriesAndMutationsApi.ts +++ b/refact-agent/gui/src/services/graphql/queriesAndMutationsApi.ts @@ -40,7 +40,7 @@ import { import { type RootState } from "../../app/store"; import { setThreadFtId } from "../../features/ThreadMessages"; import { Tool } from "../refact/tools"; -import { IntegrationMeta } from "../../features/Chat"; +import type { IntegrationMeta } from "../../features/ThreadMessages"; async function fetchAppSearchableId(apiKey: string, port: number) { const appIdUrl = `http://127.0.0.1:${port}/v1/get-app-searchable-id`; @@ -195,7 +195,7 @@ export const graphqlQueriesAndMutations = createApi({ const state = api.getState() as RootState; const apiKey = state.config.apiKey ?? ""; const port = state.config.lspPort; - // TODO: where is current workspace set? + const workspace = state.teams.group?.id ?? state.config.currentWorkspaceName ?? ""; @@ -301,7 +301,7 @@ export const graphqlQueriesAndMutations = createApi({ const state = api.getState() as RootState; const apiKey = state.config.apiKey ?? ""; const port = state.config.lspPort; - // TODO: where is current workspace set? + const workspace = state.teams.group?.id ?? state.config.currentWorkspaceName ?? ""; diff --git a/refact-agent/gui/src/services/refact/chat.ts b/refact-agent/gui/src/services/refact/chat.ts index edb13b756..51bab6604 100644 --- a/refact-agent/gui/src/services/refact/chat.ts +++ b/refact-agent/gui/src/services/refact/chat.ts @@ -1,6 +1,3 @@ -import { IntegrationMeta, LspChatMode } from "../../features/Chat"; -import { CHAT_URL } from "./consts"; -// import { ToolCommand } from "./tools"; import { ChatRole, ThinkingBlock, @@ -19,6 +16,12 @@ export type LSPUserMessage = Pick< content: UserMessage["ftm_content"]; }; +export type LSPToolMessage = { + role: "tool"; + content: ToolMessage["ftm_content"]; + tool_call_id: string; +}; + export type LspChatMessage = | { role: ChatRole; @@ -33,11 +36,8 @@ export type LspChatMessage = usage?: Usage | null; } | LSPUserMessage - | { - role: "tool"; - content: ToolMessage["ftm_content"]; - tool_call_id: string; - }; + | LSPToolMessage + | { role: string; content: string }; // could be more narrow. export function isLspChatMessage(json: unknown): json is LspChatMessage { @@ -56,44 +56,6 @@ export function isLspUserMessage( return message.role === "user"; } -type StreamArgs = - | { - stream: true; - abortSignal: AbortSignal; - } - | { stream: false; abortSignal?: undefined | AbortSignal }; - -type SendChatArgs = { - messages: LspChatMessage[]; - last_user_message_id?: string; // used for `refact-message-id` header - model: string; - lspUrl?: string; - takeNote?: boolean; - onlyDeterministicMessages?: boolean; - chatId?: string; - port?: number; - apiKey?: string | null; - // isConfig?: boolean; - toolsConfirmed?: boolean; - checkpointsEnabled?: boolean; - integration?: IntegrationMeta | null; - mode?: LspChatMode; // used for chat actions - boost_reasoning?: boolean; - increase_max_tokens?: boolean; -} & StreamArgs; - -type GetChatTitleArgs = { - messages: LspChatMessage[]; - model: string; - lspUrl?: string; - takeNote?: boolean; - onlyDeterministicMessages?: boolean; - chatId?: string; - port?: number; - apiKey?: string | null; - boost_reasoning?: boolean; -} & StreamArgs; - export type GetChatTitleResponse = { choices: Choice[]; created: number; @@ -141,6 +103,7 @@ export type PromptTokenDetails = { cached_tokens: number; }; +// TODO: check this export type Usage = { // completion_tokens: number; // prompt_tokens: number; @@ -205,114 +168,3 @@ export function isUsage(usage: unknown): usage is Usage { return true; } - -// TODO: add config url -export async function sendChat({ - messages, - model, - abortSignal, - stream, - // lspUrl, - // takeNote = false, - onlyDeterministicMessages: only_deterministic_messages, - chatId: chat_id, - port = 8001, - apiKey, - checkpointsEnabled = true, - // isConfig = false, - integration, - last_user_message_id = "", - mode, - boost_reasoning, - increase_max_tokens = false, -}: SendChatArgs): Promise { - // const toolsResponse = await getAvailableTools(); - - // const tools = takeNote - // ? toolsResponse.filter( - // (tool) => tool.function.name === "remember_how_to_use_tools", - // ) - // : toolsResponse.filter( - // (tool) => tool.function.name !== "remember_how_to_use_tools", - // ); - - const body = JSON.stringify({ - messages, - model: model, - stream, - only_deterministic_messages, - checkpoints_enabled: checkpointsEnabled, - // chat_id, - parameters: boost_reasoning ? { boost_reasoning: true } : undefined, - increase_max_tokens: increase_max_tokens, - meta: { - chat_id, - request_attempt_id: last_user_message_id, - // chat_remote, - // TODO: pass this through - chat_mode: mode ?? "EXPLORE", - // chat_mode: "EXPLORE", // NOTOOLS, EXPLORE, AGENT, CONFIGURE, PROJECTSUMMARY, - // TODO: not clear, that if we set integration.path it's going to be set also in meta as current_config_file - ...(integration?.path ? { current_config_file: integration.path } : {}), - }, - }); - - // const apiKey = getApiKey(); - const headers = { - "Content-Type": "application/json", - ...(apiKey ? { Authorization: "Bearer " + apiKey } : {}), - }; - - const url = `http://127.0.0.1:${port}${CHAT_URL}`; - - return fetch(url, { - method: "POST", - headers, - body, - redirect: "follow", - cache: "no-cache", - // TODO: causes an error during tests :/ - // referrer: "no-referrer", - signal: abortSignal, - credentials: "same-origin", - }); -} - -export async function generateChatTitle({ - messages, - stream, - model, - onlyDeterministicMessages: only_deterministic_messages, - chatId: chat_id, - port = 8001, - apiKey, -}: GetChatTitleArgs): Promise { - const body = JSON.stringify({ - messages, - model, - stream, - max_tokens: 300, - only_deterministic_messages: only_deterministic_messages, - chat_id, - // NOTE: we don't want to use reasoning here, for example Anthropic requires at least max_tokens=1024 for thinking - // parameters: boost_reasoning ? { boost_reasoning: true } : undefined, - }); - - const headers = { - "Content-Type": "application/json", - ...(apiKey ? { Authorization: "Bearer " + apiKey } : {}), - }; - - const url = `http://127.0.0.1:${port}${CHAT_URL}`; - - return fetch(url, { - method: "POST", - headers, - body, - redirect: "follow", - cache: "no-cache", - // TODO: causes an error during tests :/ - // referrer: "no-referrer", - credentials: "same-origin", - }); -} diff --git a/refact-agent/gui/src/services/refact/commands.ts b/refact-agent/gui/src/services/refact/commands.ts index ddb9aa915..11b89650f 100644 --- a/refact-agent/gui/src/services/refact/commands.ts +++ b/refact-agent/gui/src/services/refact/commands.ts @@ -2,7 +2,7 @@ import { RootState } from "../../app/store"; import { parseOrElse } from "../../utils"; import { LspChatMessage } from "./chat"; import { AT_COMMAND_COMPLETION, AT_COMMAND_PREVIEW } from "./consts"; -import type { ChatContextFile, ChatMeta } from "./types"; +import type { ChatContextFile } from "./types"; import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; @@ -86,7 +86,7 @@ export const commandsApi = createApi({ CommandPreviewRequest >({ queryFn: async (args, api, _opts, baseQuery) => { - const { messages, meta, model } = args; + const { messages } = args; const state = api.getState() as RootState; const port = state.config.lspPort; const url = `http://127.0.0.1:${port}${AT_COMMAND_PREVIEW}`; @@ -95,7 +95,7 @@ export const commandsApi = createApi({ method: "POST", credentials: "same-origin", redirect: "follow", - body: { messages, meta, model }, + body: { messages, model_n_ctx: 2094 }, }); if (response.error) return { error: response.error }; @@ -199,14 +199,12 @@ function isCommandPreviewContent(json: unknown): json is CommandPreviewContent { export type CommandPreviewRequest = { messages: LspChatMessage[]; - meta: ChatMeta; - model: string; }; export type CommandPreviewResponse = { messages: CommandPreviewContent[]; current_context: number; - number_context: number; + // number_context: number; }; export function isCommandPreviewResponse( @@ -216,8 +214,8 @@ export function isCommandPreviewResponse( if (typeof json !== "object") return false; if (!("current_context" in json) || typeof json.current_context !== "number") return false; - if (!("number_context" in json) || typeof json.number_context !== "number") - return false; + // if (!("number_context" in json) || typeof json.number_context !== "number") + // return false; if (!("messages" in json)) return false; if (!Array.isArray(json.messages)) return false; diff --git a/refact-agent/gui/src/services/refact/consts.ts b/refact-agent/gui/src/services/refact/consts.ts index 5df7c49cd..a5e109638 100644 --- a/refact-agent/gui/src/services/refact/consts.ts +++ b/refact-agent/gui/src/services/refact/consts.ts @@ -1,4 +1,3 @@ -export const CHAT_URL = `/v1/chat`; export const STATISTIC_URL = `/v1/get-dashboard-plots`; export const AT_COMMAND_COMPLETION = "/v1/at-command-completion"; export const AT_COMMAND_PREVIEW = "/v1/at-command-preview"; @@ -12,8 +11,7 @@ export const DOCUMENTATION_LIST = `/v1/docs-list`; export const DOCUMENTATION_ADD = `/v1/docs-add`; export const DOCUMENTATION_REMOVE = `/v1/docs-remove`; export const PING_URL = `/v1/ping`; -export const PATCH_URL = `/v1/patch-single-file-from-ticket`; -export const APPLY_ALL_URL = "/v1/patch-apply-all"; + export const CHAT_LINKS_URL = "/v1/links"; export const CHAT_COMMIT_LINK_URL = "/v1/git-commit"; // Integrations @@ -29,11 +27,6 @@ export const DOCKER_CONTAINER_ACTION = "/v1/docker-container-action"; export const PREVIEW_CHECKPOINTS = "/v1/checkpoints-preview"; export const RESTORE_CHECKPOINTS = "/v1/checkpoints-restore"; -export const TELEMETRY_CHAT_PATH = "/v1/telemetry-chat"; -export const TELEMETRY_NET_PATH = "/v1/telemetry-network"; - -export const COMPRESS_MESSAGES_URL = "/v1/trajectory-compress"; - export const SET_ACTIVE_GROUP_ID = "/v1/set-active-group-id"; // Providers & Models diff --git a/refact-agent/gui/src/services/refact/links.ts b/refact-agent/gui/src/services/refact/links.ts index cd9b5cb01..6c36ada9f 100644 --- a/refact-agent/gui/src/services/refact/links.ts +++ b/refact-agent/gui/src/services/refact/links.ts @@ -1,9 +1,19 @@ import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; import { RootState } from "../../app/store"; -import { ChatMessage, ChatMessages } from "./types"; -import { formatMessagesForLsp } from "../../features/Chat/Thread/utils"; +import { + BaseMessage, + ChatMessage, + ChatMessages, + isAssistantMessage, + isDiffMessage, + isToolMessage, + isUserMessage, + LspChatMode, +} from "./types"; import { CHAT_COMMIT_LINK_URL, CHAT_LINKS_URL } from "./consts"; -import { LspChatMode } from "../../features/Chat"; + +import { LspChatMessage, LSPToolMessage, LSPUserMessage } from "./chat"; + // useful for forcing specific links // import { STUB_LINKS_FOR_CHAT_RESPONSE } from "../../__fixtures__"; @@ -227,3 +237,55 @@ function isCommitResponse(json: unknown): json is CommitResponse { // TODO: type check the arrays if we use the data anywhere. return true; } + +export function formatMessagesForLsp( + messages: BaseMessage[], +): LspChatMessage[] { + return messages.reduce((acc, message) => { + if (isUserMessage(message)) { + const { ftm_role, ftm_content, ...rest } = message; + const msg: LSPUserMessage = { + ...rest, + role: ftm_role, + content: ftm_content, + }; + return [...acc, msg]; + } + + if (isAssistantMessage(message)) { + const msg = { + role: message.ftm_role, + content: message.ftm_content, + tool_calls: message.ftm_tool_calls ?? undefined, + thinking_blocks: message.thinking_blocks ?? undefined, + finish_reason: message.finish_reason, + usage: message.usage, + }; + return [...acc, msg]; + } + + if (isToolMessage(message)) { + const msg: LSPToolMessage = { + role: "tool", + content: message.ftm_content, + tool_call_id: message.ftm_call_id, + }; + return [...acc, msg]; + } + + if (isDiffMessage(message)) { + const diff = { + role: message.ftm_role, + content: JSON.stringify(message.ftm_content), + tool_call_id: message.tool_call_id, + }; + return [...acc, diff]; + } + + const ftm_content = + typeof message.ftm_content === "string" + ? message.ftm_content + : JSON.stringify(message.ftm_content); + return [...acc, { role: message.ftm_role, content: ftm_content }]; + }, []); +} diff --git a/refact-agent/gui/src/services/refact/types.ts b/refact-agent/gui/src/services/refact/types.ts index d3a2f6b59..5caec32bf 100644 --- a/refact-agent/gui/src/services/refact/types.ts +++ b/refact-agent/gui/src/services/refact/types.ts @@ -1,9 +1,13 @@ -// import { FThreadMessageOutput } from "../../../generated/documents"; -import { LspChatMode } from "../../features/Chat"; +import type { MessagesSubscriptionSubscription } from "../../../generated/documents"; import { Checkpoint } from "../../features/Checkpoints/types"; +import { Override } from "../../utils/Override"; import { GetChatTitleActionPayload, GetChatTitleResponse, Usage } from "./chat"; import { MCPArgs, MCPEnvs } from "./integrations"; +export type BaseMessage = NonNullable< + MessagesSubscriptionSubscription["comprehensive_thread_subs"]["news_payload_thread_message"] +>; + export type ChatRole = | "user" | "assistant" @@ -121,22 +125,10 @@ export function isSingleModelToolMessage(toolMessage: ToolMessage) { return typeof toolMessage.ftm_content === "string"; } -// FTheadMessageOutput -interface BaseMessage { - ftm_role: ChatRole; - ftm_content: - | string - | ChatContextFile[] - | MultiModalToolContent[] - | DiffChunk[] - | null - | (UserMessageContentWithImage | ProcessedUserMessageContentWithImages)[]; -} - -export interface ChatContextFileMessage extends BaseMessage { - ftm_role: "context_file"; - ftm_content: string; // ChatContextFile[]; -} +export type ChatContextFileMessage = Override< + BaseMessage, + { ftm_role: "context_file"; ftm_content: string } +>; export type UserImage = { type: "image_url"; @@ -149,43 +141,58 @@ export type UserMessageContentWithImage = text: string; } | UserImage; -export interface UserMessage extends BaseMessage { - ftm_role: "user"; - ftm_content: - | string - | (UserMessageContentWithImage | ProcessedUserMessageContentWithImages)[]; - checkpoints?: Checkpoint[]; - compression_strength?: CompressionStrength; -} + +export type UserMessage = Override< + BaseMessage, + { + ftm_role: "user"; + ftm_content: + | string + | (UserMessageContentWithImage | ProcessedUserMessageContentWithImages)[]; + checkpoints?: Checkpoint[]; + compression_strength?: CompressionStrength; + } +>; export type ProcessedUserMessageContentWithImages = { m_type: string; m_content: string; }; -export interface AssistantMessage extends BaseMessage, CostInfo { - ftm_role: "assistant"; - ftm_content: string | null; - reasoning_content?: string | null; // NOTE: only for internal UI usage, don't send it back - ftm_tool_calls?: ToolCall[] | null; - thinking_blocks?: ThinkingBlock[] | null; - finish_reason?: "stop" | "length" | "abort" | "tool_calls" | null; - usage?: Usage | null; -} +export type AssistantMessage = Override< + BaseMessage, + { + ftm_role: "assistant"; + ftm_content: string | null; + reasoning_content?: string | null; // NOTE: only for internal UI usage, don't send it back + ftm_tool_calls?: ToolCall[] | null; + thinking_blocks?: ThinkingBlock[] | null; // is this still here? + finish_reason?: "stop" | "length" | "abort" | "tool_calls" | null; + usage?: Usage | null; + } +>; // & CostInfo + +// TODO: is this still used? export interface ToolCallMessage extends AssistantMessage { tool_calls: ToolCall[]; } -export interface SystemMessage extends BaseMessage { - ftm_role: "system"; - ftm_content: string; -} - -export interface ToolMessage extends BaseMessage { - ftm_role: "tool"; - ftm_content: ToolContent; - ftm_call_id: string; -} +export type SystemMessage = Override< + BaseMessage, + { + ftm_role: "system"; + ftm_content: string; + } +>; + +export type ToolMessage = Override< + BaseMessage, + { + ftm_role: "tool"; + ftm_content: ToolContent; + ftm_call_id: string; + } +>; // TODO: There maybe sub-types for this export type DiffChunk = { @@ -228,11 +235,14 @@ export function isDiffChunk(json: unknown): json is DiffChunk { } return true; } -export interface DiffMessage extends BaseMessage { - ftm_role: "diff"; - ftm_content: DiffChunk[]; - tool_call_id: string; -} +export type DiffMessage = Override< + BaseMessage, + { + ftm_role: "diff"; + ftm_content: DiffChunk[]; + tool_call_id: string; + } +>; export function isUserMessage(message: unknown): message is UserMessage { if (!message) return false; @@ -242,15 +252,21 @@ export function isUserMessage(message: unknown): message is UserMessage { return message.ftm_role === "user"; } -export interface PlainTextMessage extends BaseMessage { - ftm_role: "plain_text"; - ftm_content: string; -} +export type PlainTextMessage = Override< + BaseMessage, + { + ftm_role: "plain_text"; + ftm_content: string; + } +>; -export interface CDInstructionMessage extends BaseMessage { - ftm_role: "cd_instruction"; - ftm_content: string; -} +export type CDInstructionMessage = Override< + BaseMessage, + { + ftm_role: "cd_instruction"; + ftm_content: string; + } +>; export type ChatMessage = | UserMessage @@ -279,6 +295,13 @@ export function isChatMessage(message: unknown): message is ChatMessage { export type ChatMessages = ChatMessage[]; +export type LspChatMode = + | "NO_TOOLS" + | "EXPLORE" + | "AGENT" + | "CONFIGURE" + | "PROJECT_SUMMARY"; + export type ChatMeta = { current_config_file?: string | undefined; chat_id?: string | undefined; @@ -350,6 +373,7 @@ export function isCDInstructionMessage( return message.ftm_role === "cd_instruction"; } +// Is this still used? interface BaseDelta { ftm_role?: ChatRole | null; // TODO: what are these felids for @@ -367,6 +391,7 @@ interface AssistantDelta extends BaseDelta { thinking_blocks?: ThinkingBlock[] | null; } +// TODO: can remove export function isAssistantDelta(delta: unknown): delta is AssistantDelta { if (!delta) return false; if (typeof delta !== "object") return false; diff --git a/refact-agent/gui/src/utils/Override.ts b/refact-agent/gui/src/utils/Override.ts new file mode 100644 index 000000000..ab54b90a6 --- /dev/null +++ b/refact-agent/gui/src/utils/Override.ts @@ -0,0 +1,4 @@ +export type Override< + Type, + NewType extends { [key in keyof Type]?: NewType[key] }, +> = Omit & NewType; diff --git a/refact-agent/gui/src/utils/getMetering.ts b/refact-agent/gui/src/utils/getMetering.ts index 48cad7a61..6caac8826 100644 --- a/refact-agent/gui/src/utils/getMetering.ts +++ b/refact-agent/gui/src/utils/getMetering.ts @@ -1,8 +1,18 @@ import { isUsage, Usage } from "../services/refact/chat"; -import { AssistantMessage, isAssistantMessage } from "../services/refact/types"; +import { + AssistantMessage, + BaseMessage, + isAssistantMessage, +} from "../services/refact/types"; +import { Override } from "./Override"; + +type AssistantMessageWithUsage = Override< + AssistantMessage, + { ftm_usage: Usage } +>; // TODO: cap cost should be in the messages and fix types -export function getTotalCostMeteringForMessages(messages: unknown[]) { +export function getTotalCostMeteringForMessages(messages: BaseMessage[]) { const assistantMessages = messages.filter(hasUsageAndPrice); if (assistantMessages.length === 0) return null; @@ -14,6 +24,7 @@ export function getTotalCostMeteringForMessages(messages: unknown[]) { }>( (acc, message) => { // const metering_coins_prompt = message.ftm_usage. + message.ftm_usage; return { metering_coins_prompt: acc.metering_coins_prompt + @@ -82,9 +93,9 @@ export function getTotalTokenMeteringForMessages(messages: unknown[]) { }, ); } -function hasUsageAndPrice(message: unknown): message is AssistantMessage & { - ftm_usage: Usage; -} { +function hasUsageAndPrice( + message: unknown, +): message is AssistantMessageWithUsage { if (!isAssistantMessage(message)) return false; if (!("ftm_usage" in message)) return false; if (!message.ftm_usage) return false; diff --git a/refact-agent/gui/src/utils/index.ts b/refact-agent/gui/src/utils/index.ts index 825c6c6ae..23a871579 100644 --- a/refact-agent/gui/src/utils/index.ts +++ b/refact-agent/gui/src/utils/index.ts @@ -11,3 +11,4 @@ export * from "./fencedBackticks"; export * from "./isAbsolutePath"; export * from "./isDetailMessage"; export * from "./hasProperty"; +export * from "./Override"; diff --git a/refact-agent/gui/tsconfig.json b/refact-agent/gui/tsconfig.json index e93c034a6..4ec42a03e 100644 --- a/refact-agent/gui/tsconfig.json +++ b/refact-agent/gui/tsconfig.json @@ -21,7 +21,7 @@ "noFallthroughCasesInSwitch": true, "plugins": [{ "name": "typescript-plugin-css-modules" }] }, - "include": ["src", "codegen.ts", "urqlClient.tsx"], + "include": ["src", "codegen.ts", "generated"], "references": [{ "path": "./tsconfig.node.json" }], "plugins": [ {