diff --git a/website/i18n.ts b/website/i18n.ts index 679a7f6bbd26..f9685f4da8a1 100644 --- a/website/i18n.ts +++ b/website/i18n.ts @@ -1,24 +1,46 @@ import { Locale, NestedStrings, Translations, useI18n as _useI18n } from '@edgeandnode/gds' import ar from '@/pages/ar/translations' +import de from '@/pages/de/translations' import en from '@/pages/en/translations' import es from '@/pages/es/translations' +import fr from '@/pages/fr/translations' import hi from '@/pages/hi/translations' +import it from '@/pages/it/translations' import ja from '@/pages/ja/translations' import ko from '@/pages/ko/translations' +import mr from '@/pages/mr/translations' +import nl from '@/pages/nl/translations' +import pl from '@/pages/pl/translations' +import pt from '@/pages/pt/translations' +import ru from '@/pages/ru/translations' +import sv from '@/pages/sv/translations' +import tr from '@/pages/tr/translations' +import uk from '@/pages/uk/translations' import ur from '@/pages/ur/translations' import vi from '@/pages/vi/translations' import zh from '@/pages/zh/translations' const appLocales = [ Locale.ARABIC, + // Locale.GERMAN, Locale.ENGLISH, Locale.SPANISH, + // Locale.FRENCH, Locale.HINDI, + // Locale.ITALIAN, Locale.JAPANESE, - Locale.KOREAN, + // Locale.KOREAN, + Locale.MARATHI, + // Locale.DUTCH, + // Locale.POLISH, + Locale.PORTUGUESE, + Locale.RUSSIAN, + // Locale.SWEDISH, + // Locale.TURKISH, + // Locale.UKRAINIAN, Locale.URDU, - Locale.VIETNAMESE, + // Locale.VIETNAMESE, Locale.CHINESE, ] as const @@ -39,11 +61,22 @@ export type AppTranslations = Translations & { export const translations = { ar, + de, en, es, + fr, hi, + it, ja, ko, + mr, + nl, + pl, + pt, + ru, + sv, + tr, + uk, ur, vi, zh, diff --git a/website/pages/ar/arbitrum/_meta.js b/website/pages/ar/arbitrum/_meta.js index 88222dc6a9b7..321fe93849be 100644 --- a/website/pages/ar/arbitrum/_meta.js +++ b/website/pages/ar/arbitrum/_meta.js @@ -1,3 +1,5 @@ +import meta from '../../en/arbitrum/_meta.js' + export default { - 'arbitrum-faq': '', + ...meta, } diff --git a/website/pages/ar/arbitrum/arbitrum-faq.mdx b/website/pages/ar/arbitrum/arbitrum-faq.mdx index a9618a47bd2a..188e57bdae23 100644 --- a/website/pages/ar/arbitrum/arbitrum-faq.mdx +++ b/website/pages/ar/arbitrum/arbitrum-faq.mdx @@ -1,79 +1,77 @@ --- -title: Arbitrum FAQ +title: الأسئلة الشائعة حول Arbitrum --- Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. -## General FAQs +## لماذا يقوم The Graph بتطبيق حل L2؟ -### Why is The Graph implementing an L2 Solution? +من خلال توسيع نطاق TheGrraph في L2، يمكن للمشاركين في الشبكة توقع ما يلي: -By scaling The Graph on L2, network participants can expect: +- Upwards of 26x savings on gas fees -- 26x savings on gas fees +- سرعة أكبر في المعاملات -- Faster transaction speed +- Security inherited from Ethereum -- Secured by Ethereum +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. -The protocol allows network participants to interact more frequently at a reduced cost in gas fees. This enables Indexers to index a greater number of subgraphs, allows developers to deploy and upgrade subgraphs with greater ease, enables Delegators to delegate GRT with increased frequency, and gives Curators the ability to add signal to a larger number of subgraphs. +The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. -The Graph community [decided](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move forward with Arbitrum last year. +## ما الذي يجب علي فعله لاستخدام The Graph في L2؟ -### What do I need to do to use The Graph on L2? - -Users bridge their GRT and ETH  using one of the following methods: +يقوم المستخدمون بربط GRT و ETH باستخدام إحدى الطرق التالية: - [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +للاستفادة من استخدام The Graph على L2 ، استخدم قائمة المنسدلة للتبديل بين الشبكات. ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -### As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## بصفتي مطور subgraph ، أو مستهلك بيانات ، أو مفهرس ، أو مسنق ، أو مفوض ، ماذا علي أن أفعل الآن؟ -There is no immediate action required. +There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. -Core developer teams are working to create migration helpers that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect migration helpers to be available soon. +Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +اعتبارًا من 10 أبريل 2023 ، تم سك 5٪ من جميع مكافآت الفهرسة على Arbitrum. مع زيادة المشاركة في الشبكة ، وموافقة المجلس عليها ، ستتحول مكافآت الفهرسة تدريجياً من Ethereum إلى Arbitrum ، وستنتقل في النهاية بالكامل إلى Arbitrum. -### If I would like to participate in the network on L2, what should I do? +## إذا كنت أرغب في المشاركة في اشبكة L2 ، فماذا أفعل؟ Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/vtvv7FP). -### Are there any risks associated with scaling the network to L2? +## هل توجد أي مخاطر مرتبطة بتوسيع الشبكة إلى L2؟ All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -### Will existing subgraphs on Ethereum continue to work? +## هل ستستمر ال subgraphs الموجودة على Ethereum في العمل؟ -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +نعم ، ستعمل عقود شبكة The Graph بالتوازي على كل من Ethereum و Arbitrum حتى الانتقال بشكل كامل إلى Arbitrum في وقت لاحق. -### Will GRT have a new smart contract deployed on Arbitrum? +## هل سيكون لدى GRT عقد ذكي جديد يتم نشره على Arbitrum؟ Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. -## Billing on Arbitrum FAQs +## الأسئلة الشائعة حول إعداد الفواتير في Arbitrum -### What do I need to do about the GRT in my billing balance? +## ما الذي علي فعله بشأن ال GRT في حساب الفوترة الخاص بي ؟ -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +لا شئ! لقد تم نقل GRT الخاصة بك بشكل آمن إلى Arbitrum ويتم استخدامها للدفع مقابل الاستعلامات. -### How do I know my funds have migrated securely to Arbitrum? +## كيف أعرف أن أموالي قد انتقلت بشكل آمن إلى Arbitrum؟ All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -### How do I know the Arbitrum bridge is secure? +## كيف أعرف أن جسر Arbitrum آمن؟ The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. -### What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## ماذا علي أن أفعل إذا قمت بإضافة GRT جديد من محفظة Ethereum mainnet الخاصة بي؟ Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. diff --git a/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx new file mode 100644 index 000000000000..527639455d8c --- /dev/null +++ b/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx @@ -0,0 +1,315 @@ +--- +title: الأسئلة الشائعة حول أدوات النقل L2 +--- + +> لم يتم إصدار أدوات نقل L2 حتى الآن. من المتوقع أن تكون متاحة في صيف عام 2023. + +## ما هي أدوات النقل L2؟ + +جعل The Graph تكلفة المشاركة في الشبكة أرخص بمقدار 26 مرة للمساهمين من خلال نشر البروتوكول على منصة Arbitrum One. تم إنشاء أدوات التحويل إلى L2 بواسطة المطورين الأساسيين لتسهيل الانتقال إلى L2. لكل مشارك في البروتوكول، سيتم مشاركة مجموعة من أدوات التحويل لتجربة سلسة عند الانتقال إلى L2، مما تجنب فترات الذوبان أو الاضطرار إلى سحب GRT يدويًا. ستتطلب هذه الأدوات منك اتباع مجموعة محددة من الخطوات اعتمادًا على الدور الذي تلعبه داخل The Graph وما تقوم بنقله إلى L2. + +## هل يمكنني استخدام نفس المحفظة التي استخدمها في Ethereum mainnet؟ + +إذا كنت تستخدم محفظة [EOA] \(https://ethereum.org/en/developers/docs/accounts/#types-of-account) ، فيمكنك استخدام نفس العنوان. إذا كانت محفظة Ethereum mainnet الخاصة بك عبارة عن عقد (مثل multisig) ، فيجب عليك تحديد [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) حيث سيتم إرسال التحويل الخاص بك. يرجى التحقق من العنوان بعناية لأن أي تحويلات إلى عنوان غير صحيح يمكن أن تؤدي إلى خسارة غير قابلة للرجوع. إذا كنت ترغب في استخدام multisig على L2 ، فتأكد من نشر عقد multisig على Arbitrum One. + +## نقل الـ Subgraph (الرسم البياني الفرعي) + +## كيفكيف أقوم بتحويل الـ subgraph الخاص بي؟ + +لنقل الـ subgraph الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ التحويل على شبكة Ethereum mainnet + +2. انتظر 20 دقيقة للتأكيد + +3. قم بتأكيد نقل الـ subgraph على Arbitrum \\ \* + +4. قم بإنهاء نشر الـ subgraph على Arbitrum + +5. جدث عنوان URL للاستعلام (مستحسن) + +\\ \* لاحظ أنه يجب عليك تأكيد النقل في غضون 7 أيام وإلا فقد يتم فقد الـ subgraph الخاص بك. في معظم الحالات ، سيتم تشغيل هذه الخطوة تلقائيًا ، ولكن قد تكون هناك حاجة إلى تأكيد يدوي إذا كان هناك ارتفاع في أسعار الغاز على Arbitrum. إذا كان هناك أي مشكلة أثناء هذه العملية ، فستكون هناك موارد للمساعدة: اتصل بالدعم على support@thegraph.com أو على [Discord] \(https://discord.gg/vtvv7FP). + +## من أين يجب أن أبدأ التحويل ؟ + +يمكنك بدء عملية النقل من [Subgraph Studio] \(https://thegraph.com/studio/) ، [Explorer ،] \(https://thegraph.com/explorer) أو من أي صفحة تفاصيل subgraph. انقر فوق الزر "Transfer Subgraph" في صفحة تفاصيل الرسم الـ subgraph لبدء النقل. + +## كم من الوقت سأنتظر حتى يتم نقل الـ subgraph الخاص بي + +يستغرق وقت النقل حوالي 20 دقيقة. يعمل جسر Arbitrum في الخلفية لإكمال نقل الجسر تلقائيًا. في بعض الحالات ، قد ترتفع تكاليف الغاز وستحتاج إلى تأكيد المعاملة مرة أخرى. + +## هل سيظل الـ subgraph قابلاً للاكتشاف بعد أن أنقله إلى L2؟ + +سيكون الـ subgraph الخاص بك قابلاً للاكتشاف على الشبكة التي تم نشرها عليها فقط. على سبيل المثال ، إذا كان الـ subgraph الخاص بك موجودًا على Arbitrum One ، فيمكنك العثور عليه فقط في Explorer على Arbitrum One ولن تتمكن من العثور عليه على Ethereum. يرجى التأكد من تحديد Arbitrum One في مبدل الشبكة في أعلى الصفحة للتأكد من أنك على الشبكة الصحيحة. بعد النقل ، سيظهر الـ L1 subgraph على أنه مهمل. + +## هل يلزم نشر الـ subgraph الخاص بي لنقله؟ + +للاستفادة من أداة نقل الـ subgraph ، يجب أن يكون الرسم البياني الفرعي الخاص بك قد تم نشره بالفعل على شبكة Ethereum الرئيسية ويجب أن يكون لديه إشارة تنسيق مملوكة للمحفظة التي تمتلك الرسم البياني الفرعي. إذا لم يتم نشر الرسم البياني الفرعي الخاص بك ، فمن المستحسن أن تقوم ببساطة بالنشر مباشرة على Arbitrum One - ستكون رسوم الغاز أقل بكثير. إذا كنت تريد نقل رسم بياني فرعي منشور ولكن حساب المالك لا يملك إشارة تنسيق عليه ، فيمكنك الإشارة بمبلغ صغير (على سبيل المثال 1 GRT) من ذلك الحساب ؛ تأكد من اختيار إشارة "auto-migrating". + +## ماذا يحدث لإصدار Ethereum mainnet للرسم البياني الفرعي الخاص بي بعد أن النقل إلى Arbitrum؟ + +بعد نقل الرسم البياني الفرعي الخاص بك إلى Arbitrum ، سيتم إهمال إصدار Ethereum mainnet. نوصي بتحديث عنوان URL للاستعلام في غضون 48 ساعة. ومع ذلك ، هناك فترة سماح تحافظ على عمل عنوان URL للشبكة الرئيسية الخاصة بك بحيث يمكن تحديث أي دعم dapp لجهة خارجية. + +## بعد النقل ، هل أحتاج أيضًا إلى إعادة النشر على Arbitrum؟ + +بعد فترة النقل البالغة 20 دقيقة ، ستحتاج إلى تأكيد النقل لإكمال النقل ، أداة النقل ستوجهك للقيام بذلك. سيستمر دعم L1 endpoint الخاص بك خلال فترة النقل وفترة السماح. من المستحسن أن تقوم بتحديثه عندما يكون ذلك مناسبًا لك. + +## هل سيكون هناك وقت تعطل للـ endpoint الخاصة بي أثناء إعادة النشر؟ + +هنا يجب ألا يكون هناك وقت تعطل عند استخدام أداة النقل لنقل الرسم البياني الفرعي الخاص بك إلى L2 ، ستكون L1 endpoint مدعومة أثناء فترة النقل وفترة السماح. من المستحسن أن تقوم بتحديث endpoint الخاصة بك عندما يكون ذلك مناسبًا لك. + +## هل يتم نشر وتخطيط الإصدار بنفس الطريقة في الـ L2 كما هو الحال في شبكة Ethereum Ethereum mainnet؟ + +نعم. عند النشر في Subgraph Studio تأكد من تحديد Arbitrum One كشبكتك المنشورة. سيتوفر في الاستوديو أحدث endpoint والتي تشير إلى أحدث إصدار محدث من الرسم البياني الفرعي. + +## هل سينتقل تنسيق الـ subgraph مع الـ subgraph ؟ + +If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. + +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. + +## هل يمكنني إعادة الرسم البياني الفرعي الخاص بي إلى Ethereum mainnet بعد أن أقوم بالنقل؟ + +بمجرد النقل ، سيتم إهمال إصدار شبكة Ethereum mainnet للرسم البياني الفرعي الخاص بك. إذا كنت ترغب في العودة إلى mainnet ، فستحتاج إلى إعادة النشر (redeploy) والنشر مرة أخرى على mainnet. ومع ذلك ، لا يُنصح بشدة بالتحويل مرة أخرى إلى شبكة Ethereum mainnet حيث سيتم في النهاية توزيع مكافآت الفهرسة بالكامل على Arbitrum One. + +## لماذا أحتاج إلى Bridged ETH لإكمال النقل؟ + +يتم دفع رسوم الغاز في Arbitrum One باستخدام ETHbridged ETH (ETH الذي تم ربطه بـ Arbitrum One). ومع ذلك ، فإن رسوم الغاز أقل بكثير عند مقارنتها بشبكة Ethereum mainnet. + +## Curation Signal(إشارة التنسيق) + +## How do I transfer my curation? + +To transfer your curation, you will need to complete the following steps: + +1. ابدأ نقل الإشارة على شبكة Ethereum mainnet + +2. Specify an L2 Curator address\* + +3. انتظر 20 دقيقة للتأكيد + +\\ \* إذا لزم الأمر - تستخدم عنوان عقد. + +## How will I know if the subgraph I curated has moved to L2? + +When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. + +## What if I do not wish to move my curation to L2? + +عندما يتم إهمال الرسم البياني الفرعي ، يكون لديك خيار سحب الإشارة. وبالمثل ، إذا انتقل الرسم البياني الفرعي إلى L2 ، فيمكنك اختيار سحب الإشارة في شبكة Ethereum الرئيسية أو إرسال الإشارة إلى L2. + +## How do I know my curation successfully transferred? + +يمكن الوصول إلى تفاصيل الإشارة عبر Explorer بعد حوالي 20 دقيقة من بدء أداة النقل للـ L2. + +## Can I transfer my curation on more than one subgraph at a time? + +لا يوجد خيار كهذا حالياً. + +## Indexer Stake(حصة المفهرس) + +## كيف يمكنني تحويل حصتي إلى Arbitrum؟ + +لتحويل حصتك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ تحويل الحصص على شبكة Ethereum mainnet + +2. انتظر 20 دقيقة للتأكيد + +3. Confirm stake transfer on Arbitrum + +\\ \* لاحظ أنه يجب عليك تأكيد التحويل في غضون 7 أيام وإلا قد تفقد حصتك. في معظم الحالات ، سيتم تشغيل هذه الخطوة تلقائيًا ، ولكن قد تكون هناك حاجة إلى تأكيد يدوي إذا كان هناك ارتفاع في أسعار الغاز على Arbitrum. إذا كانت هناك أي مشكلة أثناء هذه العملية ، فستكون هناك موارد للمساعدة: اتصل بالدعم على support@thegraph.com أو على [Discord] \(https://discord.gg/vtvv7FP). + +## هل سيتم تحويل حصتي بالكامل؟ + +يمكنك اختيار مقدار حصتك المراد تحويلها. إذا اخترت تحويل حصتك بالكامل مرة واحدة ، فستحتاج إلى إغلاق أي تخصيصات مفتوحة أولاً. + +إذا كنت تخطط لنقل أجزاء من حصتك في معاملات متعددة ، فيجب عليك دائمًا تحديد نفس عنوان المستفيد. + +ملاحظة: يجب أن تفي بالحد الأدنى من متطلبات الحصة على L2 في المرة الأولى التي تستخدم فيها أداة التحويل. يجب أن يرسل المفهرسون 100 ألف GRT كحد أدنى (عند استدعاء هذه الوظيفة في المرة الأولى). في حالة ترك جزء من الحصة على L1 ، يجب أن يكون أيضًا أكثر من 100 ألف GRT كحد أدنى وأن يكون كافيًا (جنبًا إلى جنب مع التفويضات) لتغطية مخصصاتك المفتوحة. + +## كم من الوقت لدي لتأكيد تحويل حصتي إلى Arbitrum؟ + +\\ _ \\ _ \\ \* يجب تأكيد معاملتك لإتمام تحويل الحصة على Arbitrum. يجب إكمال هذه الخطوة في غضون 7 أيام وإلا فقد يتم فقدان الحصة. + +## What if I have open allocations? + +إذا كنت لا ترسل كل حصصك، فإن أداة نقل L2 ستتحقق من أن الحد الأدنى 100 ألف GRT لا يزال في شبكة Ethereum mainnet وأن حصتك المتبقية وتفويضك كافيان لتغطية أي تخصيصات مفتوحة. قد تحتاج إلى إغلاق التخصيصات المفتوحة إذا كان رصيد GRT الخاص بك لا يغطي الحد الأدنى + المخصصات المفتوحة. + +## باستخدام أدوات النقل ، هل من الضروري الانتظار 28 يومًا لإلغاء الحصة في Ethereum mainnet قبل التحويل؟ + +لا ، يمكنك تحويل حصتك إلى L2 على الفور ، ولا داعي لإلغاء حصتك والانتظار قبل استخدام أداة التحويل. لا يسري الانتظار لمدة 28 يومًا إلا إذا كنت ترغب في سحب الحصة إلى محفظتك ، على شبكة Ethereum mainnet أو L2. + +## كم من الوقت سيستغرق تحويل حصتي؟ + +ستستغرق أداة النقل L2 حوالي 20 دقيقة لإكمال تحويل حصتك. + +## هل يجب أن أقوم بالفهرسة على Arbitrum قبل أن أنقل حصتي؟ + +يمكنك تحويل حصتك بشكل فعال أولاً قبل إعداد الفهرسة ، ولكن لن تتمكن من المطالبة بأي مكافآت على L2 حتى تقوم بتخصيصها لـ subgraphs على L2 وفهرستها وعرض POIs. + +## هل يستطيع المفوضون نقل تفويضهم قبل نقل indexing stake الخاص بي؟ + +لا ، لكي يقوم المفوضون بنقل GRT المفوضة إلى Arbitrum ، يجب أن يكون المفهرس الذي يتم التفويض إليه نشطًا في L2. + +## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? + +Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +## Delegation(التفويض) + +## How do I transfer my delegation? + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet + +2. انتظر 20 دقيقة للتأكيد + +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). + +## ماذا يحدث لمكافآتي إذا بدأت عملية تحويل وكان لا يزال التخصيص مفتوحا على Ethereum mainnet؟ + +If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. + +## What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +## Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +## ماذا لو لم أتمكن من العثور على المفهرس الذي قمت بالتوفيض إليه في L2؟ + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +## Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +## How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). + +## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +## Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +## Vesting Contract Transfer + +## How do I transfer my vesting contract? + +To transfer your vesting, you will need to complete the following steps: + +1. Initiate the vesting transfer on Ethereum mainnet + +2. انتظر 20 دقيقة للتأكيد + +3. Confirm vesting transfer on Arbitrum + +## How do I transfer my vesting contract if I am only partially vested? + +1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) + +2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + +3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. + +4. Withdraw any remaining ETH from the transfer tool contract + +## How do I transfer my vesting contract if I am fully vested? + +For those that are fully vested, the process is similar: + +1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) + +2. Set your L2 address with a call to the transfer tool contract + +3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. + +4. Withdraw any remaining ETH from the transfer tool contract + +## Can I transfer my vesting contract to Arbitrum? + +You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). + +When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. + +تتم عمليات النقل باستخدام أداة النقل(Transfer Tool) التي ستكون مرئية في ملف تعريف Explorer الخاص بك عند الاتصال بحساب عقد الاستحقاق. + +يرجى ملاحظة أنك لن تكون قادرًا على استخلاص/ سحب GRT من عقد الاستحقاق على L2 حتى نهاية الجدول الزمني للاستحقاق عندما يتم تخويل عقدك بالكامل. إذا كنت بحاجة لتحرير GRT قبل ذلك الحين ، فيمكنك إعادة نقل GRT إلى عقد الاستحقاق على L1 باستخدام أداة تحويل أخرى متاحة لهذا الغرض. + +إذا لم تقم بتحويل أي رصيد من عقود الاستحقاق إلى L2 ، وكان عقد الاستحقاق الخاص بك مخولًا بالكامل ، فلا يجب عليك تحويل عقد الاستحقاق الخاص بك إلى L2. بدلاً من ذلك ، يمكنك استخدام أدوات التحويل لتعيين عنوان محفظة L2 ، وتحويل حصتك أو تفويضك مباشرةً إلى هذه المحفظة العادية على L2. + +## أنا أستخدم عقد الاستحقاق الخاص بي للقيام بالتخزين (staking) في mainnet. هل يمكنني تحويل حصتي إلى Arbitrum؟ + +نعم ، ولكن إذا كان عقدك لا يزال مستحقًا ، فيمكنك فقط نقل الحصة بحيث تكون مملوكة لعقد الاستحقاق L2 الخاص بك. يجب أولاً تهيئة عقد L2 هذا عن طريق تحويل بعض رصيد GRT باستخدام أداة تحويل عقد الاستحقاق في Explorer. إذا كان عقدك مخولًا بالكامل ، فيمكنك تحويل حصتك إلى أي عنوان على L2 ، ولكن يجب عليك تعيينها مسبقًا وإيداع بعض ETH لأداة التحويل L2 لدفع ثمن غاز L2. + +## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? + +Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. + +## هل يمكنني تحديد مستفيد مختلف لعقد الاستحقاق الخاص بي على L2؟ + +نعم ، في المرة الأولى التي تقوم فيها بتحويل رصيد وإعداد عقد استحقاق L2 ، يمكنك تحديد مستفيد من L2. تأكد من أن هذا المستفيد عبارة عن محفظة يمكنها إجراء المعاملات على Arbitrum One ، يجب أن تكون EOA أو multisig تم نشرها على Arbitrum One. + +If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. + +## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? + +Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. + +This allows you to transfer your stake or delegation to any L2 address. + +## عقد الاستحقاق الخاص بي لا يزال مستحقًا. كيف أقوم بتحويل رصيد عقد الاستحقاق الخاص بي إلى L2؟ + +تنطبق هذه الخطوات فقط إذا كان عقدك لا يزال مستحقًا ، أو إذا كنت قد استخدمت هذه العملية من قبل عندما كان عقدك لا يزال مستحقًا. + +لتحويل عقد الاستحقاق الخاص بك إلى L2 ، سوف ترسل أي رصيد GRT إلى L2 باستخدام أدوات التحويل ، والتي ستعمل على تهيئة عقد استحقاق L2 الخاص بك: + +1. قم بإيداع بعض ETH في عقد أداة النقل (سيتم استخدام هذا لدفع ثمن غاز L2) + +2. إبطال وصول البروتوكول إلى عقد الاستحقاق (مطلوب للخطوة التالية) + +3. امنح البروتوكول حق الوصول إلى عقد الاستحقاق (سيسمح لعقدك بالتفاعل مع أداة التحويل) + +4. حدد عنوان المستفيد على L2 \\ \* وابدأ في تحويل الرصيد على Ethereum mainnet + +5. انتظر 20 دقيقة للتأكيد + +6. قم بتأكيد تحويل الرصيد على L2 + +\\ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. + +\\ _ \\ _ \\ _ \\ _ يجب تأكيد معاملتك لإتمام تحويل الرصيد على Arbitrum. يجب إكمال هذه الخطوة في غضون 7 أيام وإلا فقد يتم فقد الرصيد. في معظم الحالات ، سيتم تشغيل هذه الخطوة تلقائيًا ، ولكن قد تكون هناك حاجة إلى تأكيد يدوي إذا كان هناك ارتفاع في أسعار الغاز على Arbitrum. إذا كانت هناك أية مشكلة أثناء هذه العملية ، فستكون هناك موارد للمساعدة: اتصل بالدعم على support@thegraph.com أو على [Discord] \(https://discord.gg/vtvv7FP). + +## هل يمكنني إرجاع عقد الاستحقاق إلى L1؟ + +ليست هناك حاجة للقيام بذلك لأن عقد الاستحقاق الخاص بك لا يزال في L1. عندما تستخدم أدوات التحويل ، فأنت تقوم فقط بإنشاء عقد جديد في L2 مرتبط بعقد الاستحقاق L1 الخاص بك ، ويمكنك إرسال GRT ذهابًا وإيابًا بينهما. + +## لماذا أحتاج إلى تغيير عقد الاستحقاق الخاص بي من البداية؟ + +You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. + +## ماذا يحدث إذا حاولت سحب عقدي عندما لم يتم تنفيذه بالكامل؟هل هذا ممكن؟ + +هذا ليس احتمال. يمكنك إعادة الأموال إلى L1 وسحبها هناك. + +## ماذا لو لم أرغب في نقل عقد الاستحقاق الخاص بي إلى L2؟ + +You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx new file mode 100644 index 000000000000..6ac37ef04e01 --- /dev/null +++ b/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx @@ -0,0 +1,165 @@ +--- +title: دليل أدوات نقل(Transfer Tools) L2 +--- + +> لم يتم إصدار أدوات نقل الطبقة الثانية L2 حتى الآن. من المتوقع أن تكون متاحة في صيف عام 2023. + +جعل The Graph من السهل الانتقال إلى L2 على Arbitrum One. لكل مشارك في البروتوكول ، توجد مجموعة من أدوات نقل L2 لجعل النقل إلى L2 سلسًا لجميع المشاركين في الشبكة. ستطلب منك هذه الأدوات اتباع مجموعة محددة من الخطوات بناءً على ما تقوم بنقله. + +بعض الأسئلة المتكررة حول هذه الأدوات تمت الإجابة عليها في [الأسئلة الشائعة حول أدوات نقل الطبقة الثانية] \(/arbitrum/l2-transfer-tools-faq). تحتوي الأسئلة الشائعة على تفسيرات متعمقة لكيفية استخدام الأدوات وكيفية عملها والأمور التي يجب وضعها في الاعتبار عند إستخدامها. + +## كيف تنقل الغراف الفرعي الخاص بك إلى شبكة آربترم (الطبقة الثانية) + +## فوائد نقل الرسوم البيانية الفرعية الخاصة بك + +مجتمع الغراف والمطورون الأساسيون كانوا [يستعدون] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) للإنتقال إلى آربترم على مدى العام الماضي. ترث آربترم، سلسلة كتل من الطبقة الثانية أو "L2"، الأمان من سلسلة إيثيريوم ولكنها توفر رسوم غازٍ أقل بشكلٍ جذري. + +When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. + +## فهم ما يحدث مع الإشارة وغرافك الفرعي على الطبقة الأولى وعناوين مواقع الإستعلام + +لنقل الرسم البياني الفرعي إلى Arbitrum يتم استخدام جسر Arbitrum GRT ، والذي يستخدم بدوره جسر Arbitrum الأصلي ، لإرسال الرسم البياني الفرعي إلى L2. سيؤدي "النقل" إلى إهمال الرسم البياني الفرعي على الشبكة الرئيسية وإرسال المعلومات لإعادة إنش اء الرسم البياني الفرعي على L2 باستخدام الجسر. وسيشمل أيضًا GRT hالذي تم استخدامه للإشارة ، والذي يجب أن يكون أكثر من صفر حتى يقبل الجسر النقل. + +عندما تختار نقل الرسم البياني الفرعي ، سيؤدي ذلك إلى تحويل جميع إشارات التنسيق الخاصة بالرسم الفرعي إلى GRT. هذا يعادل "إهمال" الرسم البياني الفرعي على الشبكة الرئيسية. سيتم إرسال GRT المستخدمة لعملية التنسيق الخاصة بك إلى L2 جمباً إلى جمب مع الرسم البياني الفرعي ، حيث سيتم استخدامها لإنتاج الإشارة نيابة عنك. + +يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون جزء من GRT الخاص بهم ، أو نقله أيضًا إلى L2 لصك إشارة على نفس الرسم البياني الفرعي. إذا لم يقم مالك الرسم البياني الفرعي بنقل الرسم البياني الفرعي الخاص به إلى L2 وقام بإيقافه يدويًا عبر استدعاء العقد ، فسيتم إخطار المنسقين وسيتمكنون من سحب تنسيقهم. + +بمجرد نقل الرسم البياني الفرعي ، لن يتلقى المفهرسون بعد الآن مكافآت لفهرسة الرسم البياني الفرعي، نظرًا لأنه يتم تحويل كل التنسيق لـ GRT. ومع ذلك ، سيكون هناك مفهرسون 1) سيستمرون في خدمة الرسوم البيانية الفرعية المنقولة لمدة 24 ساعة ، و 2) سيبدأون فورًا في فهرسة الرسم البياني الفرعي على L2. ونظرًا لأن هؤلاء المفهرسون لديهم بالفعل رسم بياني فرعي مفهرس ، فلا داعي لانتظار مزامنة الرسم البياني الفرعي ، وسيكون من الممكن الاستعلام عن الرسم البياني الفرعي على L2 مباشرة تقريبًا. + +يجب إجراء الاستعلامات على الرسم البياني الفرعي في L2 على عنوان URL مختلف (على \`` Arbitrum-gateway.thegraph.com`) ، لكن عنوان URL L1 سيستمر في العمل لمدة 48 ساعة على الأقل. بعد ذلك ، ستقوم بوابة L1 بإعادة توجيه الاستعلامات إلى بوابة L2 (لبعض الوقت) ، ولكن هذا سيضيف زمن تأخير لذلك يوصى تغيير جميع استعلاماتك إلى عنوان URL الجديد في أقرب وقت ممكن. + +## اختيار محفظة L2 الخاصة بك + +When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. + +عند نقل الرسم البياني الفرعي إلى Arbitrum ، يمكنك اختيار محفظة مختلفة والتي ستمتلك هذا الـ subgraph NFT على L2. + +إذا كنت تستخدم محفظة "عادية" مثل MetaMask (حساب مملوك خارجيًا EOA ، محفظة ليست بعقد ذكي) ، فهذا اختياري ويوصى بالاحتفاظ بعنوان المالك نفسه كما في L1. + +إذا كنت تستخدم محفظة بعقد ذكي ، مثل multisig (على سبيل المثال Safe) ، فإن اختيار عنوان مختلف لمحفظة L2 أمر إلزامي ، حيث من المرجح أن هذا الحساب موجود فقط على mainnet ولن تكون قادرًا على إجراء المعاملات على Arbitrum باستخدام هذه المحفظة. إذا كنت ترغب في الاستمرار في استخدام محفظة عقد ذكية أو multisig ، فقم بإنشاء محفظة جديدة على Arbitrum واستخدم عنوانها كمالك للرسم البياني الفرعي الخاص بك على L2. + +** من المهم جدًا استخدام عنوان محفظة تتحكم فيه ، ويمكنه إجراء معاملات على Arbitrum. وإلا فسيتم فقد الرسم البياني الفرعي ولا يمكن استعادته. ** + +## التحضير لعملية النقل: إنشاء جسر لـبعض ETH + +يتضمن نقل الرسم البياني الفرعي إرسال معاملة عبر الجسر ، ثم تنفيذ معاملة أخرى على Arbitrum. تستخدم المعاملة الأولى ETH على mainnet ، وتتضمن بعض ETH لدفع ثمن الغاز عند استلام الرسالة على L2. ومع ذلك ، إذا كان هذا الغاز غير كافٍ ، فسيتعين عليك إعادة إجراء المعاملة ودفع ثمن الغاز مباشرةً على L2 (هذه هي "الخطوة 3: تأكيد التحويل" أدناه). يجب تنفيذ هذه الخطوة ** في غضون 7 أيام من بدء التحويل **. علاوة على ذلك ، سيتم إجراء المعاملة الثانية مباشرة على Arbitrum ("الخطوة 4: إنهاء التحويل على L2"). لهذه الأسباب ، ستحتاج إلى بعض ETH في محفظة Arbitrum. إذا كنت تستخدم multisig أو عقداً ذكياً ، فيجب أن يكون ETH في المحفظة العادية (EOA) التي تستخدمها لتنفيذ المعاملات ، وليس على محفظة multisig نفسها. + +يمكنك شراء ETH من بعض المنصات وسحبها مباشرة إلى Arbitrum ، أو يمكنك استخدام جسر Arbitrum لإرسال ETH من محفظة mainnet إلى L2: [bridge.arbitrum.io] \(http://bridge.arbitrum.io). نظرًا لأن رسوم الغاز على Arbitrum أقل ، فستحتاج فقط إلى مبلغ صغير. من المستحسن أن تبدأ بمبلغ منخفض (0 على سبيل المثال ، 01 ETH) للموافقة على معاملتك. + +## العثور على أداة نقل الرسم البياني الفرعي + +يمكنك العثور على أداة نقل L2 في صفحة الرسم البياني الفرعي الخاص بك على Subgraph Studio: + +![transfer tool](/img/L2-transfer-tool1.png) + +ويتوفر أيضًا على Explorer إذا كنت متصلاً بالمحفظة التي تمتلك رسمًا بيانيًا فرعيًا ويتوفر أيضًا في صفحة ذلك الرسم البياني الفرعي في Explorer: + +![Transferring to L2](/img/transferToL2.png) + +سيؤدي النقر فوق الزر Transfer to L2 إلى فتح أداة النقل حيث يمكنك بدء عملية النقل. + +## الخطوة 1: بدء عملية النقل + +قبل بدء النقل ، يجب أن تقرر العنوان الذي سيمتلك الرسم البياني الفرعي على L2 (راجع "اختيار محفظة L2 الخاصة بك" أعلاه) ، ويوصى بشدة بالحصول على بعض ETH لرسوم الغاز الذي تم ربطه بالفعل على Arbitrum (راجع "التحضير للنقل: إنشاء جسر لبعض ETH "أعلاه). + +يرجى أيضًا ملاحظة أن نقل الرسم البياني الفرعي يتطلب وجود كمية غير صفرية من إشارة التنسيق عليه بنفس الحساب الذي يمتلك الرسم البياني الفرعي ؛ إذا لم تكن قد أشرت إلى الرسم البياني الفرعي ، فسيتعين عليك إضافة القليل من إشارة التنسيق (يكفي إضافة مبلغ صغير مثل 1 GRT). + +بعد فتح أداة النقل ، ستتمكن من إدخال عنوان المحفظة L2 في حقل "Receiving wallet address" - ** تأكد من إدخال العنوان الصحيح هنا **. سيطالبك النقر فوق Transfer Subgraph بتنفيذ المعاملة على محفظتك (لاحظ أنه يتم تضمين مبلغ من ETH لدفع ثمن غاز L2) ؛ سيؤدي هذا إلى بدء النقل وإيقاف الرسم البياني الفرعي L1 (راجع "فهم ما يحدث للإشارة والرسم الفرعي L1 وعناوين URL للاستعلام" أعلاه لمزيد من التفاصيل حول ما يحدث وراء الكواليس). + +إذا قمت بتنفيذ هذه الخطوة ، ** تأكد من المتابعة حتى إكمال الخطوة 3 في أقل من 7 أيام ، وإلا فسيتم فقد الرسم البياني الفرعي وإشارة GRT. ** هذا بسبب كيفية عمل رسائل L1-L2 على Arbitrum: الرسائل التي يتم إرسالها عبر الجسر وهي "retry-able tickets" يجب تنفيذها في غضون 7 أيام ، وقد يحتاج التنفيذ الأولي إلى إعادة المحاولة إذا كانت هناك ارتفاع في سعر الغاز على Arbitrum. + +! [ابدأ النقل إلى L2] \(/ img / startTransferL2.png) + +## الخطوة 2: انتظار وصول الرسم البياني الفرعي إلى L2 + +بعد بدء عملية النقل، يتعين على الرسالة التي ترسل الـ subgraph من L1 إلى L2 أن يتم نشرها عبر جسر Arbitrum. يستغرق ذلك حوالي 20 دقيقة (ينتظر الجسر لكتلة الشبكة الرئيسية التي تحتوي على المعاملة حتى يتأكد أنها "آمنة" من إمكانية إعادة ترتيب السلسلة). + +بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. + +![شاشة انتظار](/img/screenshotOfWaitScreenL2.png) + +## الخطوة الثالثة: تأكيد التحويل + +في معظم الحالات ، سيتم تنفيذ هذه الخطوة تلقائيًا لأن غاز L2 المضمن في الخطوة 1 يجب أن يكون كافيًا لتنفيذ المعاملة التي تتلقى الرسم البياني الفرعي في عقود Arbitrum. ومع ذلك ، في بعض الحالات ، من الممكن أن يؤدي ارتفاع أسعار الغاز على Arbitrum إلى فشل هذا التنفيذ التلقائي. في هذه الحالة ، ستكون "التذكرة" التي ترسل مخططك الفرعي إلى L2 معلقة وتتطلب إعادة المحاولة في غضون 7 أيام. + +في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة L2 تحتوي بعضاً من ETH على Arbitrum ، وتبديل شبكة محفظتك إلى Arbitrum ، والنقر فوق "Confirm Transfer" لإعادة محاولة المعاملة. + +![تأكيد النقل إلى L2](/img/confirmTransferToL2.png) + +## الخطوة 4: إنهاء عملية النقل على L2 + +في هذه المرحلة ، فإنه قد تم استلام الرسم البياني الفرعي و GRT على Arbitrum ، ولكن لم يتم نشر الرسم البياني الفرعي بعد. ستحتاج إلى الاتصال باستخدام محفظة L2 التي اخترتها كمحفظة استقبال ، وتبديل شبكة محفظتك إلى Arbitrum ، والنقر فوق "Publish Subgraph" + +![نشر الـ subgraph](/img/publishSubgraphL2TransferTools.png) + +![انتظر حتى يتم نشر الرسم البياني الفرعي](/img/waitForSubgraphToPublishL2TransferTools.png) + +سيؤدي هذا إلى نشر الرسم البياني الفرعي حتى يتمكن المفهرسون الذين يعملون في Arbitrum البدء في تقديم الخدمة. كما أنه سيعمل أيضًا على إصدار إشارة التنسيق باستخدام GRT التي تم نقلها من L1. + +## الخطوة 5: تحديث عنوان URL للاستعلام + +تم نقل الرسم البياني الفرعي الخاص بك بنجاح إلى Arbitrum! للاستعلام عن الرسم البياني الفرعي ، سيكون عنوان URL الجديد هو: + +`https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` + +لاحظ أن subgraph ID على Arbitrum سيكون مختلفًا عن الذي لديك على mainnet ، ولكن يمكنك العثور عليه في Explorer أو Studio. كما هو مذكور أعلاه (راجع "فهم ما يحدث للإشارة والرسم الفرعي L1 وعناوين URL للاستعلام") سيتم دعم عنوان L1 URLالقديم لفترة قصيرة ، ولكن يجب عليك تبديل استعلاماتك إلى العنوان الجديد بمجرد مزامنة الرسم البياني الفرعي على L2. + +## كيفية نقل التنسيق الخاص بك إلى Arbitrum (L2) + +## فهم ما يحدث لعملية التنسيق خلال عمليات نقل الـ subgraph إلى L2 + +عندما ينقل مالك الرسم البياني الفرعي رسمًا فرعيًا إلى Arbitrum ، يتم تحويل كل إشارات الرسم البياني الفرعي لـ GRT في نفس الوقت. ينطبق هذا على الإشارة "التي تم ترحيلها تلقائيًا" ، وهي الإشارة التي لا ترتبط بنسخة محددة أو نشر محدد للرسم البياني الفرعي ولكنها تتبع أحدث إصدار من الرسم البياني الفرعي. + +This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. + +جزء من تلك الـ GRT يتم إرسالها لمالك الرسم البياني الفرعي إلى L2 جمباً إلى جمب مع الرسم البياني الفرعي. + +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. + +## اختيار محفظة L2 الخاصة بك + +If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. + +If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. + +إذا كنت تستخدم محفظة بعقد ذكي ، مثل multisig (على سبيل المثال Safe) ، فإن اختيار عنوان مختلف لمحفظة L2 أمر إلزامي ، حيث من المرجح أن هذا الحساب موجود فقط على mainnet ولن تكون قادرًا على إجراء المعاملات على Arbitrum باستخدام هذه المحفظة. إذا كنت ترغب في الاستمرار في استخدام محفظة عقد ذكية أو multisig ، فقم بإنشاء محفظة جديدة على Arbitrum واستخدم عنوانها كعنوان محفظة استلام على L2. + +** من المهم جدًا استخدام عنوان محفظة تتحكم فيه ، ويمكنه إجراء معاملات على Arbitrum. وإلا فسيتم فقد التنسيق ولا يمكن استعادته. ** + +## إرسال التنسيق إلى L2: الخطوة 1 + +Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. + +If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. + +When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. + +![نقل الإشارة](/img/transferSignalL2TransferTools.png) + +بعد فتح أداة النقل ، قد يُطلب منك إضافة بعضاً من ETH إلى محفظتك إذا لم يكن لديك أي منها. ستتمكن بعد ذلك من إدخال عنوان المحفظة على L2 في حقل "Receiving wallet address" - ** تأكد من إدخال العنوان الصحيح هنا **. سيطالبك النقر فوق Transfer Signal بتنفيذ المعاملة على محفظتك (لاحظ أنه يتم تضمين بعضاً من قيمة ETH لدفع رسوم غاز L2) ؛ سيتم بذلك بدء عملية النقل. + +إذا قمت بتنفيذ هذه الخطوة ، ** تأكد من المتابعة حتى إكمال الخطوة 3 في أقل من 7 أيام ، وإلا فسيتم فقد إشارة GRT. ** هذا بسبب كيفية عمل رسائل L1-L2 على Arbitrum: الرسائل التي يتم إرسالها عبر الجسر هي "retry-able tickets" يجب تنفيذها في غضون 7 أيام ، وقد يحتاج التنفيذ الأولي إلى إعادة المحاولة إذا كانت هناك ارتفاع في سعر الغاز على Arbitrum. + +## Sending curation to L2: step 2 + +البدء في عملية النقل: + +![أرسل إشارة إلى L2](/img/sendingCurationToL2Step2First.png) + +بعد بدء عملية النقل، يتعين على الرسالة التي ترسل تنسيقك من L1 إلى L2 أن يتم نشرها عبر جسر Arbitrum. يستغرق ذلك حوالي 20 دقيقة (ينتظر الجسر لكتلة الشبكة الرئيسية التي تحتوي على المعاملة حتى يتأكد أنها "آمنة" من إمكانية إعادة ترتيب السلسلة). + +بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. + +![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) + +## Sending curation to L2: step 3 + +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. + +في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة على L2 تحتوي بعضاً من ETH على Arbitrum ، وتبديل شبكة محفظتك إلى Arbitrum ، والنقر فوق "Confirm Transfer" لإعادة محاولة المعاملة. + +![أرسل إشارة إلى L2](/img/L2TransferToolsFinalCurationImage.png) + +## Withdrawing your curation on L1 + +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. diff --git a/website/pages/ar/billing.mdx b/website/pages/ar/billing.mdx index 3c21e5de1cdc..8c99b8b5126d 100644 --- a/website/pages/ar/billing.mdx +++ b/website/pages/ar/billing.mdx @@ -1,158 +1,158 @@ --- -title: Billing +title: الفوترة --- -> Invoices are generated on a weekly basis. +> يتم إصدار الفواتير على أساس أسبوعي. -There are two options for paying for your query fees: +يوجد خياران لدفع رسوم الاستعلام: -- [Paying with fiat currency with Banxa](#billing-with-banxa) -- [Paying with crypto wallet](#billing-on-arbitrum) +- [الدفع بالعملة الورقية مع Banxa](#billing-with-banxa) +- [الدفع بمحفظة التشفير](#billing-on-arbitrum) -## Billing with Banxa +## الفواتير مع Banxa -Banxa enables you to bypass the need for an exchange and pay for your query fees using the fiat currency of your choice. The fiat currency will be converted to GRT, added to your account balance on the billing contract, and used to pay for queries associated with your API keys. +يمكّنك Banxa من تجاوز الحاجة إلى صرف العملة ودفع رسوم الاستعلام باستخدام العملة الورقية التي تختارها. سيتم تحويل العملة الورقية إلى GRT ، وإضافتها إلى رصيد حسابك في عقد الفوترة ، واستخدامها للدفع مقابل الاستفسارات المرتبطة بمفاتيح API الخاصة بك. -There may be KYC requirements depending on the regulations in your country. For more information about KYC, please visit [Banxa's FAQ page](https://docs.banxa.com/docs/faqs). +قد تكون هناك متطلبات تعرف على عميلك (KYC) بناءً على اللوائح المعمول بها في بلدك. لمزيد من المعلومات حول KYC ، يرجى زيارة [ صفحة الأسئلة الشائعة في Banxa ](https://docs.banxa.com/docs/faqs). -You can learn more about Banxa by reading their [documentation](https://docs.banxa.com/docs). +يمكنك معرفة المزيد حول Banxa من خلال قراءة [ وثائقهم ](https://docs.banxa.com/docs). -### Paying for query fees with Banxa +### دفع رسوم الاستعلام مع Banxa -1. Select “Pay with Card” option in [Subgraph Studio](https://thegraph.com/studio/billing/?show=Deposit). -2. Enter the amount of GRT to be added to your account balance. -3. Click the 'Continue with Banxa' button. +1. حدد خيار "الدفع بالبطاقة" في [ Subgraph Studio ](https://thegraph.com/studio/billing/؟show=Deposit). +2. أدخل مبلغ GRT لإضافته إلى رصيد حسابك. +3. انقر فوق الزر "متابعة مع Banxa". 4. Enter necessary banking information on Banxa including payment method & fiat currency of choice. -5. Finish the transaction. +5. قم بإنهاء المعاملة. -It may take up to 10 minutes to complete the transaction. Once the transaction is confirmed, the purchased GRT will automatically be added to your account balance on Arbitrum. +قد يستغرق الأمر ما يصل إلى 10 دقائق لإكمال المعاملة. بمجرد تأكيد المعاملة ، ستتم إضافة GRT المشتراة تلقائيًا إلى رصيد حسابك على Arbitrum. -## Billing on Arbitrum +## الفوترة على Arbitrum -While The Graph protocol operates on Ethereum Mainnet, [the billing contract](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) lives on the [Arbitrum](https://arbitrum.io/) network to reduce transaction times and cost. You'll be required to pay the query fees generated from your API keys. Using the billing contract, you'll be able to: +بينما يعمل بروتوكول TheGraph على Ethereum Mainnet ، [يوجد عقد الفوترة ](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) على [ Arbitrum ](https://arbitrum.io/ شبكة) لتقليل أوقات المعاملات وتكلفتها. ستحتاج إلى دفع رسوم الاستعلامات الناتجة عن مفاتيح API الخاصة بك. باستخدام عقد الفوترة ، ستتمكن من: -- Add and withdraw GRT from your account balance. -- Keep track of your balances based on how much GRT you have added to your account balance, how much you have removed, and your invoices. -- Automatically pay invoices based on query fees generated, as long as there is enough GRT in your account balance. +- إضافة وسحب GRT من رصيد حسابك. +- تتبع أرصدتك بناءً على مقدار GRT الذي أضفته إلى رصيد حسابك ، والمبلغ الذي قمت بإزالته ، وفواتيرك. +- دفع الفواتير تلقائيًا بناءً على رسوم الاستعلام التي تم إنشاؤها ، طالما أن هناك ما يكفي من GRT في رصيد حسابك. -### Adding GRT using a crypto wallet +### إضافة GRT باستخدام محفظة تشفير -> This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +> تمت كتابة هذا القسم بافتراض أن لديك بالفعل GRT في محفظتك المشفرة ، وأنت على شبكة Ethereum mainnet. إذا لم يكن لديك GRT ، فيمكنك التعرف على كيفية الحصول على GRT [ هنا ](#getting-grt). -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +1. انتقل إلى [ صفحة فوترة Subgraph Studio ](https://thegraph.com/studio/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". +2. انقر على زر "توصيل المحفظة" في الزاوية اليمنى العليا من الصفحة. ستتم إعادة توجيهك إلى صفحة اختيار المحفظة. حدد محفظتك وانقر على "توصيل". -3. Click the 'Add GRT' button at the center of the page. A side panel will appear. +3. انقر فوق زر "إضافة GRT" في منتصف الصفحة. ستظهر لوحة جانبية. -4. Enter the amount of GRT you want to add to your account balance. You can also select the maximum amount of GRT you want to add to your account balance by clicking on the "Max" button. +4. أدخل مبلغ GRT الذي تريد إضافته إلى رصيد حسابك. يمكنك أيضًا تحديد الحد الأقصى لمبلغ GRT الذي تريد إضافته إلى رصيد حسابك بالنقر فوق الزر "Max". -5. Click 'Allow GRT Access' to allow the Subgraph Studio to access your GRT. Sign the associated transaction in your wallet. This will not cost any gas. +5. انقر فوق "السماح بالوصول إلى GRT" للسماح لـ Subgraph Studio بالوصول إلى GRT الخاص بك. قم بتوقيع العملية المرتبطة في محفظتك. هذا لن يكلف أي غاز. -6. Click 'Add GRT to account balance' to add the GRT to your account balance. Sign the associated transaction in your wallet. This will cost gas. +6. انقر فوق "إضافة GRT إلى رصيد الحساب" لإضافة GRT إلى رصيد حسابك. قم بتوقيع المعاملة المرتبطة في محفظتك. هذا سيكلف الغاز. -7. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. +7. بمجرد تأكيد المعاملة ، سترى GRT مضافًا إلى رصيد حسابك في غضون ساعة. -### Withdrawing GRT using a crypto wallet +### سحب GRT باستخدام محفظة تشفير -> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on the Arbitrum network. +> تمت كتابة هذا القسم بافتراض أنك أودعت GRT في رصيد حسابك على [ Subgraph Studio ](https://thegraph.com/studio/billing/) وأنك على شبكة Arbitrum. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +1. انتقل إلى [ صفحة فوترة Subgraph Studio](https://thegraph.com/studio/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". +2. انقر على زر "توصيل المحفظة" في الزاوية اليمنى العليا من الصفحة. حدد محفظتك وانقر على "توصيل". -3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. +3. انقر فوق القائمة المنسدلة بجوار زر "إضافة GRT" في منتصف الصفحة. حدد سحب GRT. ستظهر لوحة جانبية. -4. Enter the amount of GRT you would like to withdraw. +4. أدخل مبلغ GRT الذي ترغب في سحبه. -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. The GRT will be sent to your Arbitrum wallet. +5. انقر فوق "سحب GRT" لسحب GRT من رصيد حسابك. قم بتوقيع المعاملة المرتبطة في محفظتك. هذا سيكلف الغاز. سيتم إرسال GRT إلى محفظة Arbitrum الخاصة بك. -6. Once the transaction is confirmed, you'll see the GRT withdrawn from your account balance in your Arbitrum wallet. +6. بمجرد تأكيد العملية ، سترى أن GRT قد تم سحبه من رصيد حسابك في محفظة Arbitrum الخاصة بك. -### Adding GRT using a multisig wallet +### إضافة GRT باستخدام محفظة متعددة التوقيع (multisig wallet) -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +1. انتقل إلى [ صفحة فوترة Subgraph Studio](https://thegraph.com/studio/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. +2. انقر على زر "توصيل المحفظة " في الزاوية اليمنى العليا من الصفحة. حدد محفظتك وانقر على "توصيل". إذا كنت تستخدم [ Gnosis-Safe ](https://gnosis-safe.io/) ، فستتمكن من توصيل multisig بالإضافة إلى محفظة التوقيع الخاصة بك. ثم قم بتوقيع الرسالة المرتبطة. هذا لن يكلف أي غاز. -3. Click the 'Add GRT' button at the center of the page. A side panel will appear. +3. انقر فوق زر "إضافة GRT" في منتصف الصفحة. ستظهر لوحة جانبية. -4. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. +4. بمجرد تأكيد المعاملة ، سترى GRT مضافًا إلى رصيد حسابك في غضون ساعة. -### Withdrawing GRT using a multisig wallet +### سحب GRT باستخدام محفظة multisig -> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on Ethereum mainnet. +> تمت كتابة هذا القسم بافتراض أنك أودعت GRT في رصيد حسابك على [ Subgraph Studio ](https://thegraph.com/studio/billing/) وأنك تستخدم Ethereum mainnet. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +1. انتقل إلى [ صفحة فوترة Subgraph Studio](https://thegraph.com/studio/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". +2. انقر على زر "توصيل المحفظة" في الزاوية اليمنى العليا من الصفحة. حدد محفظتك وانقر على "توصيل". -3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. +3. انقر فوق القائمة المنسدلة بجوار زر "إضافة GRT" في منتصف الصفحة. حدد سحب GRT. ستظهر لوحة جانبية. -4. Enter the amount of GRT you would like to withdraw. Specify the receiving wallet which will receive the GRT from this transaction. The GRT will be sent to the receiving wallet on Arbitrum. +4. أدخل مبلغ GRT الذي ترغب في سحبه. حدد المحفظة المستلمة التي ستتلقى GRT من هذه المعاملة. سيتم إرسال GRT إلى المحفظة المستلمة على Arbitrum. -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. +5. انقر فوق "سحب GRT" لسحب GRT من رصيد حسابك. قم بتوقيع المعاملة المرتبطة في محفظتك. هذا سيكلف الغاز. -6. Once the transaction is confirmed, you'll see the GRT added to your Arbitrum wallet within an hour. +6. بمجرد تأكيد المعاملة ، سترى GRT مضافًا إلى محفظة Arbitrum الخاصة بك في غضون ساعة. -## Getting GRT +## الحصول على GRT This section will show you how to get GRT to pay for query fees. ### Coinbase -This will be a step by step guide for purchasing GRT on Coinbase. +سيكون هذا دليلًا تفصيليًا لشراء GRT على Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select GRT. -5. Select the payment method. Select your preferred payment method. -6. Select the amount of GRT you want to purchase. -7. Review your purchase. Review your purchase and click "Buy GRT". -8. Confirm your purchase. Confirm your purchase and you will have successfully purchased GRT. -9. You can transfer the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). - - To transfer the GRT to your crypto wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the GRT account. - - Enter the amount of GRT you want to send and the wallet address you want to send it to. - - Click "Continue" and confirm your transaction. -Please note that for larger purchase amounts, Coinbase may require you to wait 7-10 days before transferring the full amount to a crypto wallet. +1. انتقل إلى [ Coinbase ](https://www.coinbase.com/) وأنشئ حسابًا. +2. بمجرد إنشاء حساب ، ستحتاج إلى التحقق من هويتك من خلال عملية تعرف على العميل المعروفة باسم KYC. هذه إجرائات روتينية لجميع منصات تداول العملات المشفرة المركزية أو المحافظ الخاصة. +3. بمجرد التحقق من هويتك ، يمكنك شراء GRT. يمكنك القيام بذلك عن طريق النقر فوق زر "شراء / بيع" في أعلى يمين الصفحة. +4. حدد العملة التي ترغب في شرائها. حدد GRT. +5. حدد طريقة الدفع. حدد طريقة الدفع المفضلة لديك. +6. حدد مبلغ GRT الذي تريد شراءه. +7. يرجى مراجعة عملية الشراء الخاصة بك. قم بمراجعة عملية الشراء وانقر على "شراء GRT". +8. قم بتأكيد الشراء. قم بتأكيد الشراء وستكون قد اشتريت GRT بنجاح. +9. يمكنك نقل GRT من حسابك إلى محفظة التشفير مثل [ MetaMask ](https://metamask.io/). + - لنقل GRT إلى محفظة التشفير الخاصة بك ، انقر فوق زر "حسابات" في أعلى يمين الصفحة. + - انقر فوق زر "إرسال" الموجود بجوار حساب GRT. + - أدخل مبلغ GRT الذي تريد إرساله وعنوان المحفظة الذي تريد الإرسال إليه. + - انقر على "متابعة" وقم بتأكيد معاملتك. -يرجى ملاحظة أنه بالنسبة لمبالغ الشراء الكبيرة ، قد يطلب منك Coinbase الانتظار من 7 إلى 10 أيام قبل تحويل المبلغ بالكامل إلى محفظة تشفير. -You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +يمكنك معرفة المزيد حول الحصول على GRT على Coinbase [ هنا ](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i- buy-digital-currency). ### Binance -This will be a step by step guide for purchasing GRT on Binance. +سيكون هذا دليلًا تفصيليًا لشراء GRT على Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy Now" button on the homepage banner. -4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. -5. Select your preferred payment method. You'll be able to pay with different fiat currencies such as Euros, US Dollars, and more. -6. Select the amount of GRT you want to purchase. -7. Review your purchase and click "Buy GRT". -8. Confirm your purchase and you will be able to see your GRT in your Binance Spot Wallet. -9. You can withdraw the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). - - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your crypto wallet, add your crypto wallet's address to the withdrawel whitelist. - - Click on the "wallet" button, click withdraw, and select GRT. - - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. - - Click "Continue" and confirm your transaction. +1. انتقل إلى [ Binance ](https://www.binance.com/en) وأنشئ حسابًا. +2. بمجرد إنشاء حساب ، ستحتاج إلى التحقق من هويتك من خلال عملية تعرف باسم KYC (أو اعرف عميلك). هذا إجراء روتيني لجميع المنصات المركزية أو المحافظ الخاصه. +3. بمجرد التحقق من هويتك ، يمكنك شراء GRT. يمكنك القيام بذلك عن طريق النقر فوق زر "اشترِ الآن" الموجود على في الصفحة الرئيسية. +4. سيتم نقلك إلى صفحة حيث يمكنك تحديد العملة التي تريد شرائها. حدد GRT. +5. حدد طريقة الدفع المفضلة لديك. ستتمكن من الدفع بعملات ورقية مختلفة مثل اليورو والدولار الأمريكي والمزيد. +6. حدد كمية GRT الذي تريد شراءه. +7. راجع عملية الشراء وانقر على "شراء GRT". +8. قم بتأكيد عملية الشراء وستتمكن من رؤية GRT الخاص بك في محفظة Binance Spot الخاصة بك. +9. يمكنك سحب GRT من حسابك إلى محفظتك المشفرة مثل [ MetaMask ](https://metamask.io/). + - [ لسحب ](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) GRT إلى محفظتك الرقمية ، أضف عنوان محفظتك الرقمية إلى القائمة البيضاء للسحب. + - انقر فوق زر "المحفظة" ، وانقر فوق سحب ، ثم أختار GRT. + - أدخل كمية GRT الذي تريد إرساله وعنوان المحفظة الموجودة في القائمة البيضاء الذي تريد إرساله إليه. + - انقر على "متابعة" وقم بتأكيد معاملتك. -You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +يمكنك معرفة المزيد حول الحصول على GRT على Binance [ هنا ](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -This is how you can purchase GRT on Uniswap. +هذه هي الطريقة التي يمكنك بها شراء GRT على Uniswap. -1. Go to [Uniswap](https://app.uniswap.org/#/swap) and connect your wallet. -2. Select the token you want to swap from. Select ETH. -3. Select the token you want to swap to. Select GRT. - - Make sure you're swapping for the correct token. The GRT smart contract address is: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` -4. Enter the amount of ETH you want to swap. -5. Click "Swap". -6. Confirm the transaction in your wallet and you wait for the transaction to process. +1. انتقل إلى [ Uniswap ](https://app.uniswap.org/#/swap) وقم بتوصيل محفظتك. +2. حدد التوكن الذي ترغب في استبداله. حدد ETH. +3. حدد التوكن الذي ترغب في تبديله. حدد GRT. + - تأكد من تبديل التوكن الصحيح. عنوان العقد الذكي GRT هو: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` +4. الرجاء إدخال كمية ETH التي ترغب في تحويلها. +5. انقر على زر "مبادلة". +6. قم بتأكيد المعاملة في محفظتك وانتظر حتى تتم المعالجة. -You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +يمكنك التعرف على المزيد حول الحصول على GRT على Uniswap [ هنا ](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). -## Arbitrum Bridge +## جسر Arbitrum -The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +تم تصميم عقد الفوترة فقط لربط GRT من شبكة Ethereum mainnet إلى شبكة Arbitrum. إذا كنت ترغب في نقل GRT من Arbitrum مرة أخرى إلى Ethereum mainnet ، فستحتاج إلى استخدام [ Arbitrum Bridge ](https://bridge.arbitrum.io/؟l2ChainId=42161). diff --git a/website/pages/ar/cookbook/base-testnet.mdx b/website/pages/ar/cookbook/base-testnet.mdx index e32996e543ee..89c026c90979 100644 --- a/website/pages/ar/cookbook/base-testnet.mdx +++ b/website/pages/ar/cookbook/base-testnet.mdx @@ -11,7 +11,7 @@ What you'll need: ## Subgraph Studio -### 1. Install the Graph CLI +### 1. قم بتثبيت Graph CLI The Graph CLI (>=v0.41.0) is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. diff --git a/website/pages/ar/cookbook/substreams-powered-subgraphs.mdx b/website/pages/ar/cookbook/substreams-powered-subgraphs.mdx new file mode 100644 index 000000000000..8439209fd6f9 --- /dev/null +++ b/website/pages/ar/cookbook/substreams-powered-subgraphs.mdx @@ -0,0 +1,227 @@ +--- +title: Substreams-powered subgraphs +--- + +[Substreams](/substreams/README) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. + +## Requirements + +This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): + +``` +npm install -g @graphprotocol/graph-cli +``` + +## Get the cookbook + +> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). + +``` +graph init --from-example substreams-powered-subgraph +``` + +## Defining a Substreams package + +A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams/README), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. + +The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): + +```proto +syntax = "proto3"; + +package example; + +message Contracts { + repeated Contract contracts = 1; +} + +message Contract { + string address = 1; + uint64 blockNumber = 2; + string timestamp = 3; + uint64 ordinal = 4; +} +``` + +The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: + +``` +#[substreams::handlers::map] +fn map_contract(block: eth::v2::Block) -> Result { + let contracts = block + .transactions() + .flat_map(|tx| { + tx.calls + .iter() + .filter(|call| !call.state_reverted) + .filter(|call| call.call_type == eth::v2::CallType::Create as i32) + .map(|call| Contract { + address: format!("0x{}", Hex(&call.address)), + block_number: block.number, + timestamp: block.timestamp_seconds().to_string(), + ordinal: tx.begin_ordinal, + }) + }) + .collect(); + Ok(Contracts { contracts }) +} +``` + +A Substreams package can be used by a subgraph as long as it has a module which outputs compatible entity changes. The example Substreams package has an additional `graph_out` module in `lib.rs` which returns a `substreams_entity_change::pb::entity::EntityChanges` output, which can be processed by Graph Node. + +> The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. + +``` +#[substreams::handlers::map] +pub fn graph_out(contracts: Contracts) -> Result { + // hash map of name to a table + let mut tables = Tables::new(); + + for contract in contracts.contracts.into_iter() { + tables + .create_row("Contract", contract.address) + .set("timestamp", contract.timestamp) + .set("blockNumber", contract.block_number); + } + + Ok(tables.to_entity_changes()) +} +``` + +These types and modules are pulled together in `substreams.yaml`: + +``` +specVersion: v0.1.0 +package: + name: 'substreams_test' # the name to be used in the .spkg + version: v1.0.1 # the version to use when creating the .spkg + +imports: # dependencies + entity: https://github.com/streamingfast/substreams-entity-change/releases/download/v0.2.1/substreams-entity-change-v0.2.1.spkg + +protobuf: # specifies custom types for use by Substreams modules + files: + - example.proto + importPaths: + - ./proto + +binaries: + default: + type: wasm/rust-v1 + file: ./target/wasm32-unknown-unknown/release/substreams.wasm + +modules: # specify modules with their inputs and outputs. + - name: map_contract + kind: map + inputs: + - source: sf.ethereum.type.v2.Block + output: + type: proto:test.Contracts + + - name: graph_out + kind: map + inputs: + - map: map_contract + output: + type: proto:substreams.entity.v1.EntityChanges # this type can be consumed by Graph Node + +``` + +You can check the overall "flow" from a Block, to `map_contract` to `graph_out` by running `substreams graph`: + +```mermaid +graph TD; + map_contract[map: map_contract]; + sf.ethereum.type.v2.Block[source: sf.ethereum.type.v2.Block] --> map_contract; + graph_out[map: graph_out]; + map_contract --> graph_out; +``` + +To prepare this Substreams package for consumption by a subgraph, you must run the following commands: + +```bash +yarn substreams:protogen # generates types in /src/pb +yarn substreams:build # builds the substreams +yarn substreams:package # packages the substreams in a .spkg file + +# alternatively, yarn substreams:prepare calls all of the above commands +``` + +> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands + +This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. + +> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. + +## Defining a Substreams-powered subgraph + +Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. + +This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. + +> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). + +```yaml +specVersion: 0.0.4 +description: Ethereum Contract Tracking Subgraph (powered by Substreams) +repository: https://github.com/graphprotocol/graph-tooling +schema: + file: schema.graphql +dataSources: + - kind: substreams + name: substream_test + network: mainnet + source: + package: + moduleName: graph_out + file: substreams-test-v1.0.1.spkg + mapping: + kind: substreams/graph-entities + apiVersion: 0.0.5 +``` + +The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. + +```graphql +type Contract @entity { + id: ID! + + "The timestamp when the contract was deployed" + timestamp: String! + + "The block number of the contract deployment" + blockNumber: BigInt! +} +``` + +Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. + +> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). + +```bash +yarn install # install graph-cli +yarn subgraph:build # build the subgraph +yarn subgraph:deploy # deploy the subgraph +``` + +That's it! You have built and deployed a Substreams-powered subgraph. + +## Serving Substreams-powered subgraphs + +In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: + +```toml +[chains.mainnet] +shard = "main" +protocol = "ethereum" +provider = [ + { label = "substreams-provider-mainnet", + details = { type = "substreams", + url = "https://mainnet-substreams-url.grpc.substreams.io/", + token = "exampletokenhere" }}, + { label = "firehose-provider-mainnet", + details = { type = "firehose", + url = "https://mainnet-firehose-url.grpc.firehose.io/", + token = "exampletokenhere" }}, +] +``` diff --git a/website/pages/ar/cookbook/upgrading-a-subgraph.mdx b/website/pages/ar/cookbook/upgrading-a-subgraph.mdx new file mode 100644 index 000000000000..378467289f6a --- /dev/null +++ b/website/pages/ar/cookbook/upgrading-a-subgraph.mdx @@ -0,0 +1,225 @@ +--- +title: Upgrading an Existing Subgraph to The Graph Network +--- + +## مقدمة + +This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! + +The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. + +### المتطلبات الأساسية + +- You have already deployed a subgraph on the hosted service. +- القراف الفرعي يقوم بفهرسة سلسلة متوفرة (أو متوفرة في النسخة التجريبية) على شبكة القراف. +- You have a wallet with ETH to publish your subgraph on-chain. +- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. + +## Upgrading an Existing Subgraph to The Graph Network + +> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). + +1. احصل على أحدث إصدار من graph-cli المثبت: + +```sh +npm install -g @graphprotocol/graph-cli +``` + +```sh +yarn global add @graphprotocol/graph-cli +``` + +Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. + +2. Inside the subgraph's main project repository, authenticate the subgraph to deploy and build on the studio: + +```sh +graph auth --studio +``` + +3. أنشئ الملفات وقم ببناء الـ الفرعيةرسمبياني: + +```sh +graph codegen && graph build +``` + +If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). + +4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. + +```sh +graph deploy --studio +``` + +5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): + +```sh +{ + users(first: 5) { + id + liquidityPositions { + id + } + } + bundles(first: 5) { + id + ethPrice + } +} +``` + +6. At this point, your subgraph is now deployed on Subgraph Studio, but not yet published to the decentralized network. You can now test the subgraph to make sure it is working as intended using the temporary query URL as seen on top of the right column above. As this name already suggests, this is a temporary URL and should not be used in production. + +- Updating is just publishing another version of your existing subgraph on-chain. +- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. +- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. + +7. Publish the subgraph on The Graph's decentralized network by hitting the "Publish" button. + +You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. + +And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). + +Feel free to leverage the [#Curators channel](https://discord.gg/rC8rBuRtbH) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. + +### Create an API key + +You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). + +![API key creation page](/img/api-image.png) + +At the end of each week, an invoice will be generated based on the query fees that have been incurred during this period. This invoice will be paid automatically using the GRT available in your balance. Your balance will be updated after the cost of your query fees are withdrawn. Query fees are paid in GRT via the Arbitrum network. You will need to add GRT to the Arbitrum billing contract to enable your API key via the following steps: + +- Purchase GRT on an exchange of your choice. +- Send the GRT to your wallet. +- On the Billing page in Studio, click on Add GRT. + +![Add GRT in billing](/img/Add-GRT-New-Page.png) + +- Follow the steps to add your GRT to your billing balance. +- Your GRT will be automatically bridged to the Arbitrum network and added to your billing balance. + +![Billing pane](/img/New-Billing-Pane.png) + +> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. + +### Securing your API key + +It is recommended that you secure the API by limiting its usage in two ways: + +1. Authorized Subgraphs +2. Authorized Domain + +You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). + +![Subgraph lockdown page](/img/subgraph-lockdown.png) + +### Querying your subgraph on the decentralized network + +Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. + +![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) + +As soon as the first Indexer has fully indexed your subgraph you can start to query the subgraph on the decentralized network. In order to retrieve the query URL for your subgraph, you can copy/paste it by clicking on the symbol next to the query URL. You will see something like this: + +`https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` + +Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. + +You can now use that Query URL in your dapp to send your GraphQL requests to. + +Congratulations! You are now a pioneer of decentralization! + +> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: + +```graphql +{ + stakers(block: { number_gte: 14486109 }) { + id + } +} +``` + +More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). + +## Updating a Subgraph on the Network + +If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. + +1. Make changes to your current subgraph. A good idea is to test small fixes on the Subgraph Studio by publishing to Goerli. +2. انشر ما يلي وحدد الإصدار الجديد في الأمر (مثل v0.0.1 ، v0.0.2 ، إلخ): + +```sh +graph deploy --studio +``` + +3. اختبر الإصدار الجديد في Subgraph Studio من خلال الاستعلام في الـ playground +4. انشر الإصدار الجديد على شبكة The Graph. تذكر أن هذا يتطلب غاز (كما هو موضح في القسم أعلاه). + +### Owner Update Fee: Deep Dive + +> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). + +An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). + +The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. + +كمثال على ذلك ، هذه هي الحالة فقط إذا كان الـ subgraph الخاص بك يتم تنسيقه بشكل نشط: + +- تتم الإشارة بـ 100،000 GRT باستخدام الترحيل التلقائي في v1 لـ subgraph +- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned +- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. + +_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ + +### الحفاظ على إصدار مستقر من Subgraph + +If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/rC8rBuRtbH) on Discord to let Indexers know when you're versioning your subgraphs. + +Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. + +### تحديث البيانات الوصفية (Metadata) لـ Subgraph + +يمكنك تحديث البيانات الوصفية لـ subgraphs الخاص بك دون الحاجة إلى نشر إصدار جديد. تتضمن البيانات الوصفية اسم الـ subgraph والصورة والوصف و URL لموقع الويب و URL كود المصدر والفئات. يمكن للمطورين القيام بذلك عن طريق تحديث تفاصيل الـ subgraph الخاصة بهم في Subgraph Studio حيث يمكنك تعديل جميع الحقول الملائمة. + +Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. + +## أفضل الممارسات لنشر Subgraph على شبكة The Graph + +1. الاستفادة من اسم ENS لتطوير الـ Subgraph: + +- Set up your ENS [here](https://app.ens.domains/) +- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). + +2. كلما تم ملء البروفايل الخاص بك ، كلما زادت فرص فهرسة الـ subgraphs الخاصة بك وتنسيقها. + +## إيقاف Subgraph على شبكة The Graph + +Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. + +## الاستعلام عن Subgraph + الفوترة على شبكة The Graph + +The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. + +In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). + +### تقدير رسوم الاستعلام على الشبكة + +على الرغم من أن هذه ليست ميزة موجودة في واجهة مستخدم المنتج ، إلا أنه يمكنك تعيين الحد الأقصى لميزانيتك لكل استعلام وذلك عن طريق أخذ المبلغ الذي ترغب في دفعه شهريًا وتقسيمه على حجم الاستعلام المتوقع. + +While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. + +Remember that it's a dynamic and growing market, but how you interact with it is in your control. There is no maximum or minimum price specified in the protocol or the Gateways. For example, you can look at the price paid by a few of the dapps on the network (on a per-week basis), below. See the last column, which shows query fees in GRT. + +![QueryFee](/img/QueryFee.png) + +## مصادر إضافية + +If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: + + + +- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) +- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around + - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` +- [Subgraph Studio documentation](/deploying/subgraph-studio) diff --git a/website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx index 48dfa34da8f7..fbadfa128b57 100644 --- a/website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx +++ b/website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx @@ -12,15 +12,15 @@ Before using the Hosted Service, create an account in our Hosted Service. You wi ## Store the Access Token -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. +بعد إنشاء حساب ، انتقل إلى [لوحة القيادة](https://thegraph.com/hosted-service/dashboard). وانسخ رمز وصول المعروض على لوحة المعلومات وقم بتنفيذ `graph auth --product hosted-service `. سيؤدي هذا إلى تخزين رمز وصول على جهاز الكمبيوتر الخاص بك. تحتاج للقيام بذلك مرة واحدة ، أو إذا قمت بإعادة توليد رمز وصول. ## إنشاء Subgraph على الخدمة المستضافة (Hosted Service) -Before deploying the subgraph, you need to create it in The Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _'Add Subgraph'_ button and fill in the information below as appropriate: +قبل نشر الـ subgraph ، تحتاج إلى إنشائه في Graph Explorer. انتقل إلى [لوحة القيادة](https://thegraph.com/hosted-service/dashboard) وانقر على _'Add Subgraph'_ واملأ المعلومات أدناه حسب الحاجة: **Image** - اختر صورة لاستخدامها كصورة عرض وصورة مصغرة للـ subgraph. -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ +**Subgraph اسم** - بالاضافة لاسم الحساب الذي تم إنشاء الـ subgraph ضمنه، سيؤدي هذا أيضا إلى تعريف اسم النمط `account-name/subgraph-name` - المستخدم لعمليات النشر و GraphQL endpoints. _لا يمكن تغيير هذا الحقل لاحقا._ **Account** - الحساب الذي تم إنشاء الـ subgraph ضمنه. يمكن أن يكون هذا حساب فرد أو منظمة. _لا يمكن نقل الـ Subgraphs بين الحسابات لاحقا._ diff --git a/website/pages/ar/deploying/hosted-service.mdx b/website/pages/ar/deploying/hosted-service.mdx index 1093922c13a3..5870269c55d1 100644 --- a/website/pages/ar/deploying/hosted-service.mdx +++ b/website/pages/ar/deploying/hosted-service.mdx @@ -2,11 +2,11 @@ title: ما هي الخدمة المستضافة (Hosted Service)؟ --- -> Please note, the Hosted Service will begin sunsetting in Q1 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [migrate their subgraphs](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to migrate subgraphs to the decentralized network. Read more about the sunsetting of the Hosted Service [here](https://thegraph.com/blog/sunsetting-hosted-service). +> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). -This section will walk you through deploying a subgraph to the [Hosted Service](https://thegraph.com/hosted-service/). +This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). -If you don't have an account on the Hosted Service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The Hosted Service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. +If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). @@ -16,7 +16,7 @@ First follow the instructions [here](/developing/defining-a-subgraph) to install ### من عقد موجود -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the Hosted Service. +If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. يمكنك استخدام هذا الأمر لإنشاء subgraph يقوم بفهرسة جميع الأحداث من عقد موجود. هذا سيحاول جلب ABI العقد من [ Etherscan ](https://etherscan.io/). @@ -46,6 +46,6 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. -## الشبكات المدعومة على الـ Hosted Service +## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ar/developing/assemblyscript-api.mdx b/website/pages/ar/developing/assemblyscript-api.mdx index 39f33dc2fb7d..8f9e39792367 100644 --- a/website/pages/ar/developing/assemblyscript-api.mdx +++ b/website/pages/ar/developing/assemblyscript-api.mdx @@ -6,7 +6,7 @@ title: AssemblyScript API هذه الصفحة توثق APIs المضمنة التي يمكن استخدامها عند كتابة subgraph mappings. يتوفر نوعان من APIs خارج الصندوق: -- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- [مكتبة رسم بياني تيبسكريبت](https://github.com/graphprotocol/graph-ts)(`graph-ts`) - كود تم إنشاؤه من ملفات الـ subgraph بواسطة `graph codegen`. من الممكن أيضا إضافة مكتبات أخرى كـ dependencies، طالما أنها متوافقة مع [ AssemblyScript ](https://github.com/AssemblyScript/assemblyscript). نظرا لأنه تتم كتابة الـ mappings بهذه اللغة فإن [ AssemblyScript wiki ](https://github.com/AssemblyScript/assemblyscript/wiki) تعد مصدرا جيدا للغة ولميزات المكتبة القياسية. @@ -176,7 +176,7 @@ _رياضيات_ تحتوي فئة `TypedMap` على API التالية: -- `new TypedMap()` – ينشئ map فارغ بمفاتيح من النوع `K` وقيم من النوع `T` +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` - `map.set (key: K، value: V): void` - يضبط قيمة الـ `key` لـ `value` - `map.getEntry(key: K): TypedMapEntry | null` – يقوم بإرجاع زوج المفتاح-والقيمة لـ `key` أو `null` إذا كان الـ `key` غير موجود في الـ map - `map.get(key: K): V | null` – يقوم بإرجاع زوج المفتاح-والقيمة لـ `key` أو `null` إذا كان الـ `key` غير موجود في الـ map @@ -208,7 +208,7 @@ _العوامل_ - `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` - `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` -#### Address +#### العنوان ```typescript 'import { Address } from '@graphprotocol/graph-ts @@ -280,6 +280,51 @@ As the entity may not exist in the store yet, the `load` method returns a value > **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entity that the Holder entity was derived from +let token = holder.tokens.load() +``` + #### تحديث الكيانات الموجودة There are two ways to update an existing entity: @@ -668,14 +713,14 @@ The `crypto` API makes a cryptographic functions available for use in mappings. 'import { json, JSONValueKind } from '@graphprotocol/graph-ts ``` -JSON data can be parsed using the `json` API: +يمكن تحليل بيانات JSON باستخدام `json` API: - `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence - `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed - `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` - `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed -The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: +توفر فئة `JSONValue` طريقة لسحب القيم من مستند JSON عشوائي. نظرا لأن قيم JSON يمكن أن تكون منطقية وأرقاما ومصفوفات وغيرها، فإن `JSONValue` يأتي مع خاصية `kind` للتحقق من نوع القيمة: ```typescript let value = json.fromBytes(...) @@ -701,8 +746,8 @@ When the type of a value is certain, it can be converted to a [built-in type](#b | المصدر(المصادر) | الغاية | دالة التحويل | | -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | لا يوجد | -| Address | String | s.toHexString() | +| العنوان | Bytes | لا يوجد | +| العنوان | String | ()s.toHexString | | BigDecimal | String | s.toString() | | BigInt | BigDecimal | s.toBigDecimal() | | BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | @@ -731,14 +776,14 @@ When the type of a value is certain, it can be converted to a [built-in type](#b | JSON | string | s.toString() | | JSON | Array | s.toArray() | | JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | +| String | العنوان | Address.fromString(s) | +| Bytes | العنوان | Address.fromBytes(s) | | String | BigInt | BigInt.fromString(s) | | String | BigDecimal | BigDecimal.fromString(s) | | String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | | String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | -### Data Source Metadata +### البيانات الوصفية لمصدر البيانات You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: @@ -746,7 +791,7 @@ You can inspect the contract address, network and context of the data source tha - `dataSource.network(): string` - `dataSource.context(): DataSourceContext` -### Entity and DataSourceContext +### الكيان و DataSourceContext The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: diff --git a/website/pages/ar/developing/creating-a-subgraph.mdx b/website/pages/ar/developing/creating-a-subgraph.mdx index ffd8fedebdd9..6c14c2c2c543 100644 --- a/website/pages/ar/developing/creating-a-subgraph.mdx +++ b/website/pages/ar/developing/creating-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Creating a Subgraph +title: إنشاء subgraph --- A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. @@ -20,7 +20,7 @@ A subgraph extracts data from a blockchain, processing it and storing it so that ## قم بتثبيت Graph CLI -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. +تمت كتابة Graph CLI بلغة JavaScript ، وستحتاج إلى تثبيت إما `yarn` أو `npm` لاستخدامها ؛ ومن المفترض أن يكون لديك yarn كالتالي. بمجرد حصولك على `yarn` ، قم بتثبيت Graph CLI عن طريق تشغيل @@ -437,7 +437,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[ إدارة الميزات ](#experimental-features): ** من `specVersion` `0.0.4` وما بعده ، يجب الإعلان عن `fullTextSearch` ضمن قسم `features` في subgraph manifest. ### اللغات المدعومة @@ -820,7 +820,7 @@ export function handleBlock(block: ethereum.Block): void { } ``` -## Anonymous Events +## أحداث الـ مجهول If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: @@ -959,7 +959,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - يضيف أو يزيل الواجهات - يغير للكيانات التي يتم تنفيذ الواجهة لها -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[إدارة الميزات](#experimental-features):**يجب الإعلان عن `التطعيم` ضمن `features` في الفرعيةرسم بياني يظهر. ## File Data Sources @@ -975,7 +975,7 @@ This is similar to the [existing data source templates](https://thegraph.com/doc > This replaces the existing `ipfs.cat` API -### Migration guide +### Upgrade guide #### Update `graph-ts` and `graph-cli` diff --git a/website/pages/ar/developing/developer-faqs.mdx b/website/pages/ar/developing/developer-faqs.mdx index bc47d45d1d2d..da3006ee099a 100644 --- a/website/pages/ar/developing/developer-faqs.mdx +++ b/website/pages/ar/developing/developer-faqs.mdx @@ -127,16 +127,16 @@ Currently, the recommended approach for a dapp is to add the key to the frontend ## 25. Where do I go to find my current subgraph on the Hosted Service? -Head over to the Hosted Service in order to find subgraphs that you or others deployed to the Hosted Service. You can find it [here](https://thegraph.com/hosted-service). +Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). ## 26. Will the Hosted Service start charging query fees? -لن يقوم TheGraph بفرض رسوم على Hosted Service أبدًا. و TheGraph عبارة عن بروتوكول لامركزي ، ولا يتماشى فرض رسوم على خدمة مركزية مع قيم TheGraph. وال Hosted Service خطوة مؤقتة للمساعدة في الوصول إلى الشبكة اللامركزية. وسيكون لدى المطورين وقت كافٍ للانتقال إلى الشبكة اللامركزية. +The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. ## 27. When will the Hosted Service be shut down? -The Hosted Service will shut down in Q1 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the Hosted Service are encouraged to migrate to the decentralized network. Migration Grants are available for developers to help migrate their subgraph. If your dapp is migrating a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). +The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). -## 28. How do I upgrade a subgraph on mainnet? +## 28. How do I update a subgraph on mainnet? -If you’re a subgraph developer, you can upgrade a new version of your subgraph to the Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/ar/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/ar/developing/substreams-powered-subgraphs-faq.mdx new file mode 100644 index 000000000000..854850b53954 --- /dev/null +++ b/website/pages/ar/developing/substreams-powered-subgraphs-faq.mdx @@ -0,0 +1,91 @@ +--- +title: Substreams-powered subgraphs FAQ +--- + +## What are Substreams? + +Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](/substreams/developers-guide/sink-targets/README/#substreams-sinks-overview) their data anywhere. + +Go to the [Substreams Documentation](/substreams/README/) to learn more about Substreams. + +## What are Substreams-powered subgraphs? + +[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. + +If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. + +## How are Substreams-powered subgraphs different from subgraphs? + +Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. + +By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. + +## What are the benefits of using Substreams-powered subgraphs? + +Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](/substreams/developers-guide/creating-your-manifest/#module-definitions) to output to different [sinks](/substreams/developers-guide/sink-targets/#substreams-sinks-overview) such as PostgreSQL, MongoDB, and Kafka. + +## What are the benefits of Substreams? + +There are many benefits to using Substreams, including: + +- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. + +- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). + +- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. + +- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. + +- Access to additional data which is not available as part of the JSON RPC + +- All the benefits of the Firehose. + +## What is the Firehose? + +Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. + +Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. + +## What are the benefits of the Firehose? + +There are many benefits to using Firehose, including: + +- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. + +- Prevents downtimes: Designed from the ground up for High Availability. + +- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. + +- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. + +- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. + +## Where can developers access more information about Substreams-powered subgraphs and Substreams? + +The [Substreams documentation](/substreams/README/) will teach you how to build Substreams modules. + +The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. + +## What is the role of Rust modules in Substreams? + +Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. + +See [modules documentation](/substreams/developers-guide/modules/types/) for details. + +## What makes Substreams composable? + +When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. + +As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. + +## How can you build and deploy a Substreams-powered Subgraph? + +After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). + +## Where can I find examples of Substreams and Substreams-powered subgraphs? + +You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. + +## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? + +The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. diff --git a/website/pages/ar/developing/supported-networks.mdx b/website/pages/ar/developing/supported-networks.mdx index 6ced97c42dea..9859facd754c 100644 --- a/website/pages/ar/developing/supported-networks.mdx +++ b/website/pages/ar/developing/supported-networks.mdx @@ -2,73 +2,47 @@ title: الشبكات المدعومة --- -## The Graph's Decentralized Network - -The following networks are supported on The Graph's Decentralized Network: - -- `mainnet` (Ethereum) -- `gnosis`\* -- `celo`\* -- `avalanche`\* -- `arbitrum-one`\* +import { getSupportedNetworks } from '@/src/getSupportedNetworks' + + + + + + + + + + + + {getSupportedNetworks().map((network) => ( + + + + + + + + + + ))} +
NetworkCLI NameChain IDالخدمة المستضافةSubgraph StudioDecentralized NetworkSubstreams Support
{network.name} + {network.cliName} + {network.chainId}{network.supportedOnHosted ? '✓' : null}{network.supportedOnStudio ? '✓' : null}{network.supportedOnNetwork ? `✓${network.isBeta ? '*' : ''}` : null}{network.substreams ? '✓' : null}
\*In beta. -## الخدمة المستضافة - -> The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. - -The following networks are supported in beta on the Hosted Service: - -- `goerli` -- `poa-core` -- `poa-sokol` -- `matic` (now known as Polygon) -- `mumbai` -- `fantom` -- `fantom-testnet` -- `bsc` (now known as BNB Chain) -- `chapel` -- `clover` -- `fuji` -- `fuse` -- `moonriver` -- `moonbeam` -- `mbase` -- `optimism` -- `optimism-goerli` -- `aurora` -- `aurora-testnet` -- `boba` -- `harmony` -- `zkSync2-testnet` -- `osmosis-1` -- `base-testnet` - -### Near - -- `near-mainnet` -- `near-testnet` - -### Cosmos - -- `cosmoshub-4` -- `theta-testnet-001` (this is the current Cosmos Hub testnet) - -### Arweave - -- `arweave-mainnet` - -You will **not be able** to publish a subgraph that indexes a non-mainnet network to the decentralized Graph Network in [Subgraph Studio](/deploying/subgraph-studio). +The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. -For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/networks/mainnet.md#feature-support). +For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). + +Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Subgraph Studio and decentralized network. ## Graph Node -If your preferred network isn't support on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/ar/developing/unit-testing-framework.mdx b/website/pages/ar/developing/unit-testing-framework.mdx index 2fb0abe979a2..54b83b009125 100644 --- a/website/pages/ar/developing/unit-testing-framework.mdx +++ b/website/pages/ar/developing/unit-testing-framework.mdx @@ -1,8 +1,8 @@ --- -title: Unit Testing Framework +title: اختبار وحدة Framework --- -Matchstick is a unit testing framework, developed by [LimeChain](https://limechain.tech/), that enables subgraph developers to test their mapping logic in a sandboxed environment and deploy their subgraphs with confidence! +Matchstick هو اختبار وحدة framework ، تم تطويره بواسطة [ LimeChain ](https://limechain.tech/) ، والذي يسمح لمطوري الـ subgraph من اختبار منطق الـ mapping في بيئة sandboxed ونشر الـ subgraphs الخاصة بهم بثقة! ## Getting Started @@ -105,7 +105,7 @@ graph test path/to/file.test.ts ```sh -c, --coverage Run the tests in coverage mode -d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) --f --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. -h, --help Show usage information -l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) -r, --recompile Forces tests to be recompiled @@ -990,9 +990,9 @@ Notice that dataSourceMock.resetValues() is called at the end. That's because th ## Test Coverage -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The tool only works on **Linux** and **MacOS**, but when we add support for Docker (see progress on that [here](https://github.com/LimeChain/matchstick/issues/222)) users should be able to use it on any machine and almost any OS. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -The test coverage tool is really simple - it takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have actually been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. ### المتطلبات الأساسية @@ -1029,7 +1029,7 @@ You could also add a custom `coverage` command to your `package.json` file, like }, ``` -Hopefully that should execute the coverage tool without any issues. You should see something like this in the terminal: +That will execute the coverage tool and you should see something like this in the terminal: ```sh $ graph test -c diff --git a/website/pages/ar/glossary.mdx b/website/pages/ar/glossary.mdx index 2a7c0b281197..2e840513f1ea 100644 --- a/website/pages/ar/glossary.mdx +++ b/website/pages/ar/glossary.mdx @@ -8,6 +8,8 @@ title: Glossary - **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. + - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. - **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. @@ -77,3 +79,11 @@ title: Glossary - **Graph CLI**: A command line interface tool for building and deploying to The Graph. - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. + +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. + +- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. + +- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. + +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ar/mips-faqs.mdx b/website/pages/ar/mips-faqs.mdx index c1a1c8aa64b5..97c4cd412c37 100644 --- a/website/pages/ar/mips-faqs.mdx +++ b/website/pages/ar/mips-faqs.mdx @@ -1,5 +1,5 @@ --- -title: MIPs FAQs +title: الاسئلة الشائعة حول MIPs --- ## مقدمة @@ -10,23 +10,23 @@ To support the sunsetting of the hosted service and the migration of all of it's The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to migration grants for subgraph developers using multi-chain subgraphs. +The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. ### Useful Resources - [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) +- [كيف تصبح مفهرسًا فعالًا على شبكة الغراف] \(https://thegraph.com/blog/how-to-become-indexer/) +- [مركز معرفة المفهرس:ستجد هناك معلومات حول المفهرسين] \(https://thegraph.academy/indexers/) +- [مُحسِّن التخصيص] \(https://github.com/graphprotocol/allocationopt.jl) +- [أدوات تحسين التخصيص] \(https://github.com/anyblockanalytics/thegraph-allocation-optimization/) ### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? -Yes, it is indeed. +نعم ، يمكنك ذلك. For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). +قام أحد أعضاء المجتمع ،ويدعى [SunTzu] \(https://github.com/suntzu93) ، بإنشاء نص برمجي يقوم بتنفيذ هذه العملية وفقًا لمنهجية مخطط التحكيم. يمكنك الاطلاع عليها من [here](https://github.com/suntzu93/get_valid_poi_subgraph). ### 2. Which chain will the MIPs program incentivise first? diff --git a/website/pages/ar/network-transition-faq.mdx b/website/pages/ar/network-transition-faq.mdx index 22e6e6f38c79..026af04c6e81 100644 --- a/website/pages/ar/network-transition-faq.mdx +++ b/website/pages/ar/network-transition-faq.mdx @@ -1,90 +1,90 @@ --- -title: Network Transition FAQ +title: الأسئلة الشائعة حول نقل الشبكة --- Developers will have plenty of time to migrate their subgraphs to the decentralized network. Exact timelines will vary from network to network based on Indexer and network readiness-the hosted service will not end support for all networks at once and will not be sunset abruptly. -Each network on the hosted service, including Ethereum, will sunset gradually as it is supported on the decentralized network to achieve feature parity and a high quality of service. This will happen on a network-to-network basis with help from Indexers in the [MIPs program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program/), to enable full support for each network on the decentralized network. +سيتم إنهاء كل شبكة على الخدمة المستضافة تدريجيًا ، بما في ذلك Ethereum ، حيث يتم دعمها على الشبكة اللامركزية لتحقيق جودة خدمة عالية. سيتم تطبيق ذلك على كل شبكة بمساعدة المفهرسين في [برنامج MIPs](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program/)، لتحقيق الدعم الكامل لكل شبكة على الشبكة اللامركزية. -To add more clarity around continued support for each network on the hosted service, these FAQs answer common questions regarding the specifics of the network transition process. If you would like to start the subgraph migration process now, here is a [step-by-step guide](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph). To skip to the migration FAQ, [click here](#migration-faqs). +لمزيد من التفاصيل حول الدعم المستمر لكل شبكة على الخدمة المستضافة ، فهذه الأسئلة الشائعة تجيب على الأسئلة المتعلقة بتفاصيل عملية نقل الشبكة. إذا كنت ترغب في بدء عملية ترحيل الرسم البياني الفرعي الآن ، فإليك [ دليل خطوة بخطوة ](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph). لتخطي الأسئلة الشائعة المتعلقة بالترحيل ، [ انقر هنا ](#migration-faqs). ## Hosted Service Sunset FAQs -### Will I have to migrate my subgraph before the decentralized network serves core functionalities for subgraphs? +### هل سيتطلب مني ترحيل الـ subgraph الخاص بي قبل أن يتم تقديم الوظائف الأساسية للـ subgraphs على الشبكة اللامركزية؟ -Subgraph developers can begin migrating their Ethereum mainnet subgraphs now, but will not be forced to migrate subgraphs to the network before feature core functionality exists for the decentralized network and hosted service. Migration of Gnosis network subgraphs will also begin soon, with other networks to follow once Indexers have tested the networks and are ready to index them in production. +يمكن لمطوري الرسم البياني الفرعي البدء في ترحيل الرسوم البيانية الفرعية لشبكة Ethereum الرئيسية الخاصة بهم الآن ، ولكن لن يتم إجبارهم على ترحيل الرسوم البيانية الفرعية إلى الشبكة قبل وجود وظائف أساسية للشبكة اللامركزية والخدمة المستضافة. ستبدأ أيضًا عملية ترحيل الرسوم البيانية الفرعية لشبكة Gnosis قريبًا ، وستتبعها شبكات أخرى بمجرد أن يختبر المفهرسون الشبكات وتصبح جاهزة لفهرستها. -### What is the timeline and process for deprecating the hosted service? +### ما هو الإجراء المتبع لإيقاف الخدمة المستضافة والمخطط الزمني له؟ -All networks will have their own timelines, depending on when they are enabled on the network and the timeline it takes to get through each phase. Core developers are working to migrate the majority of hosted service traffic to the decentralized network as soon as possible. +لكل شبكة جدول زمني خاصة بها ، وذلك اعتمادًا على وقت تمكينها على الشبكة والجدول الزمني الذي تستغرقه عبر كل مرحلة. يعمل المطورون الأساسيون على ترحيل غالبية الخدمة المستضافة إلى الشبكة اللامركزية في أسرع وقت ممكن. -Most importantly, you will not lose access to the hosted service before core functionality is available for your specific network/subgraph on the decentralized network. +الأهم من ذلك ، أنك لن تفقد الوصول إلى الخدمة المستضافة قبل أن تتوفر الوظيفة الأساسية للشبكة / الرسم البياني الفرعي الخاص بك على الشبكة اللامركزية. -The three distinct phases of hosted service deprecation for each network are: +المراحل الثلاث لإيقاف الخدمة المستضافة لكل شبكة هي: -#### Phase 1 (The Sunray): Disable new subgraph creation for blockchains that have quality parity on the network +#### المرحلة 1 (The Sunray): تعطيل إمكانية إنشاء الرسم البياني الفرعي للبلوكتشاين الذي له تكافؤ في الجودة على الشبكة -In this stage, developers will no longer be able to deploy new subgraphs to the hosted service for that network. Developers will still be able to upgrade existing subgraphs on the hosted service. +In this stage, developers will no longer be able to deploy new subgraphs to the hosted service for that network. Developers will still be able to update existing subgraphs on the hosted service. -No network has yet begun Phase 1 of transitioning from the hosted service to the decentralized network. +لم تبدأ أي شبكة حتى الآن المرحلة الأولى في الانتقال من الخدمة المستضافة إلى الشبكة اللامركزية. -As networks enter Phase 1, please note that developers can still use the rate limited Developer Preview URL in the Subgraph Studio to develop and test their subgraphs (up to 1,000 free queries) without acquiring GRT or interacting with protocol economics. +عندما تبدأ الشبكات في المرحلة الأولى ، يرجى ملاحظة أنه لا يزال بإمكان المطورين استخدام Developer Preview URL المحدودة في Subgraph Studio لتطوير واختبار الرسوم البيانية الفرعية الخاصة بهم (تصل لـ 1000 استعلام مجاني) بدون الحصول على GRT أو التفاعل مع اقتصاد البروتوكول. -#### Phase 2 (The Sunbeam): Disable subgraph upgrades +#### Phase 2 (The Sunbeam): Disable subgraph updates -In this phase, upgrades to subgraphs must be made through Subgraph Studio and subsequently published to the decentralized network. Hosted service subgraphs for networks in this phase will still exist and will be queryable, but upgrades to subgraphs must be made on The Graph's decentralized network. +In this phase, updates to subgraphs must be made through Subgraph Studio and subsequently published to the decentralized network. Hosted service subgraphs for networks in this phase will still exist and will be queryable, but updates to subgraphs must be made on The Graph's decentralized network. -There are no exact timelines for when any network will move to this phase, as the process is driven by exit criteria surrounding core functionality, not dates. +لا يوجد جدول زمني محدد لانتقال أي شبكة إلى هذه المرحلة ، حيث أن هذه العملية تستند على معايير متعلقة بالوظائف الأساسية ، وليس التواريخ. -#### Phase 3 (The Sunrise): Disable querying subgraphs +#### المرحلة 3 (The Sunrise): تعطيل استعلام الرسوم البيانية الفرعية -At this phase, subgraphs on the hosted service for networks supported by The Graph Network will no longer process queries. The only way to query blockchain data for subgraphs on networks in this phase will be through the decentralized network. Test queries will still be available in [Subgraph Studio](https://thegraph.com/studio/) via the Development Query URL. +في هذه المرحلة ، فإن الرسوم البيانية الفرعية على الخدمة المستضافة للشبكات التي تدعمها شبكة The Graph لن تقوم بمعالجة الاستعلامات. الطريقة الوحيدة للاستعلام عن بيانات blockchain للرسم البياني الفرعي في هذه المرحلة ستكون من خلال الشبكة اللامركزية. ستظل استعلامات الاختبار متاحة في[Subgraph Studio](https://thegraph.com/studio/) عبر Development Query URL. -Networks will not move to Phase 3 until successfully moving to Phase 2 and giving developers ample time to migrate to the decentralized network. +لن تنتقل الشبكات إلى المرحلة 3 حتى تنتقل بنجاح إلى المرحلة 2 ومنح المطورين متسعًا من الوقت للانتقال إلى الشبكة اللامركزية. ![subgraph chart](/img/subgraph-chart.png) > Note: This diagram reflects the per-network sunsetting process. Hosted service sunsetting times will vary and will not sunset all at once. -### What happens to test networks like Goerli, Mumbai, etc? +### ماذا يحدث لشبكات الاختبار مثل Goerli و Mumbai وغيرها؟ -All networks and test networks are eligible for a free Deployment Query URL in the [Subgraph Studio](https://thegraph.com/studio/). This URL is rate limited and intended for test and development traffic. Production traffic will require a subgraph published to The Graph Network in order to have production grade redundancy and stability. +جميع الشبكات وشبكات الاختبار مؤهلة للحصول على عنوان URL مجاني لاستعلام النشر (Deployment Query URL) في [Subgraph Studio](https://thegraph.com/studio/).. عنوان URL هذا محدود ومخصص للاختبار والتطوير. ستتطلب حركة الإنتاج رسمًا بيانيًا فرعيًا منشورًا على شبكة The Graph من أجل الحصول على تكرار واستقرار عاليين في الإنتاج. ![Rate limit](/img/rate-limit.png) -### Does The Graph Network have the same functionalities as the hosted service? +### هل تمتلك شبكة Graph نفس وظائف الخدمة المستضافة؟ -Indexers on The Graph Network run the most recent network-approved [release of Graph Node](https://github.com/graphprotocol/graph-node/releases), and can support any subgraph features supported in that release. +يقوم المفهرسون على شبكة The Graph بتشغيل أحدث [ إصدار من Graph Node ](https://github.com/graphprotocol/graph-node/releases) معتمد من الشبكة ، ويمكنهم دعم أي ميزات للرسم الفرعي والمدعومة في ذلك الإصدار. -Sometimes unreleased features which are still under development might be available first on the Developer Preview URL, which runs the latest main commit of [Graph Node](https://github.com/graphprotocol/graph-node). These features will then become available on the network with the next Graph Node release. +في بعض الأحيان ، قد تكون الميزات التي لم يتم طرحها والتي لا تزال قيد التطوير متاحة أولاً على Developer Preview URL ، والذي يقوم بتشغيل [Graph Node](https://github.com/graphprotocol/graph-node)الرئيسي الأخير. بعد ذلك ستصبح هذه الميزات متاحة على الشبكة مع إصدار Graph Node التالي. -Certain subgraph features are not eligible for indexing rewards, if they are not deterministic or verifiable on the network. Specific examples are fetching files from IPFS, and indexing networks not yet supported on The Graph Network. +بعض ميزات الرسم البياني الفرعي إذا لم تكن قابلة للتحقق منها على الشبكة فستكون غير مؤهلة للحصول على مكافآت الفهرسة. ومن الأمثلة المحددة على ذلك جلب الملفات من IPFS وفهرسة الشبكات التي لم يتم دعمها بعد على شبكة The Graph. -Subgraphs with these features can be published to the network, but they may not be picked up by Indexers. However, subgraphs with sufficient signal may still attract Indexers interested in collecting query fees, which any subgraph is eligible for. +يمكن نشر الرسوم البيانية الفرعية على الشبكة التي تحتوي على هذه الميزات ، ولكن قد لا يتم اختيارها من قبل المفهرسين. ومع ذلك ، قد تستمر الرسوم البيانية الفرعية ذات الإشارات الكافية في جذب المفهرسين المهتمين بتحصيل رسوم الاستعلام ، والتي يحق لأي رسم بياني فرعي أن يكون مؤهلا لها. -### How much does The Graph Network cost in comparison to running my own infrastructure? +### ما هي تكلفة شبكة The Graph مقارنة بتشغيل البنية التحتية الخاصة بي؟ -The Graph's decentralized network is 60-90% less expensive than running dedicated infrastructure, as shown in [these case studies](https://thegraph.com/docs/en/network/benefits/#low-volume-user-less-than-30-000-queries-per-month). +The Graph's decentralized network is 60-90% less expensive than running dedicated infrastructure, as shown in [these case studies](https://thegraph.com/docs/en/network/benefits/#low-volume-user-less-than-30000-queries-per-month). -### Is there anything I should do with my hosted service subgraph after I migrate to the network? +### هل هناك أي شيء يجب أن أفعله بالرسم البياني الفرعي للخدمة المستضافة بعد انتقالي إلى الشبكة؟ -Hiding your hosted service subgraph is strongly recommended to avoid confusion. [This video](https://www.loom.com/share/7cffd2a7845e4fbd8c51f45c516cb7f9) walks through the process. +يوصى بشدة بإخفاء الرسم البياني الفرعي للخدمة المستضافة لتجنب الالتباس. يقدم هذا [الفيديو ](https://www.loom.com/share/7cffd2a7845e4fbd8c51f45c516cb7f9)شرحا للعملية. -### When will the decentralized network support my preferred network? +### متى ستدعم الشبكة اللامركزية شبكتي المفضلة؟ -There is no set timeline per network, they will be dictated by Indexer readiness via the [MIPs program](https://thegraph.com/migration-incentive-program/) where new networks are tested by Indexers. As new networks are supported on the network, users will receive ample notification to prepare for migration. Core devs and contributors to The Graph ecosystem are working to implement support for more networks as soon as possible. +لا يوجد مخطط زمني محدد لكل شبكة ، سيتم تحديدها من خلال استعداد المفهرس عبر [ برنامج MIPs ](https://thegraph.com/migration-incentive-program/) حيث يتم اختبار الشبكات الجديدة بواسطة المفهرسين. نظرًا لأن الشبكات الجديدة مدعومة على الشبكة ، سيتلقى المستخدمون إشعارا للتحضير لعملية الترحيل. يعمل المطورون الأساسيون والمساهمون في النظام البيئي لـ The Graph على تنفيذ الدعم لمزيد من الشبكات في أقرب وقت ممكن. -### Is Ethereum mainnet entering Phase 1 of the network transition process? +### هل تدخل شبكة Ethereum mainnet المرحلة الأولى من عملية انتقال الشبكة؟ -While Ethereum was initially anticipated to begin transition off of the hosted service by the end of Q3 2022, this has been [postponed](https://thegraph.com/blog/transitioning-to-decentralized-graph-network) to address user feedback. Additional improvements to user experience, billing, and other fulfillments of user requests will drive Ethereum's hosted service transition timeline. Stay up to date on when Ethereum will enter The Sunray phase via the integration status tracker below and via [The Graph Twitter.](http://www.twitter.com/graphprotocol) +بينما كان من المتوقع في البداية أن تبدأ Ethereum الانتقال من الخدمة المستضافة بحلول نهاية الربع الثالث من عام 2022 ، فقد تم تأجيل ذلك [ ](https://thegraph.com/blog/transitioning-to-decentralized-graph-network) للتعامل مع ملاحظات المستخدمين. ستتطلب التحسينات الإضافية في تجربة المستخدم، والفوترة، وتلبية طلبات المستخدمين الأخرى إلى تعديل الجدول زمني لعملية انتقال الخدمة المستضافة على Ethereum. ابق على اطلاع دائم بموعد دخول Ethereum إلى مرحلة Sunray عبر متتبع حالة التكامل أدناه وعبر [ The Graph Twitter. ](http://www.twitter.com/graphprotocol) -### The Graph Network integration status tracker +### متتبع حالة تكامل شبكة Graph -The table below illustrates where each network is in the network integration process. If your preferred network is not yet listed, integration has not yet begun, and that network is still fully supported by The Graph's hosted service. +يوضح الجدول أدناه مكان وجود كل شبكة في عملية التكامل. إذا لم تكن شبكتك المفضلة مدرجة بعد ، فهذا يعني أن التكامل لم يبدأ بعد ولا تزال هذه الشبكة مدعومة بالكامل بواسطة الخدمة المستضافة لـ The Graph. > This table will not include test networks, which remain free in [Subgraph Studio](https://thegraph.com/studio/). -| Network | Announcing integration on The Graph Network | Network Integration complete | Phase 1: disable new subgraphs on hosted service | Phase 2: disable subgraph upgrades on hosted service | Phase 3: disable subgraphs on hosted service | +| Network | Announcing integration on The Graph Network | اكتمل تكامل الشبكة | المرحلة 1: تعطيل الرسوم البيانية الفرعية الجديدة على الخدمة المستضافة | Phase 2: disable subgraph updates on hosted service | المرحلة 3: تعطيل الرسوم البيانية الفرعية على الخدمة المستضافة | | --- | :-: | :-: | :-: | :-: | :-: | | Ethereum | ✓ | ✓ | | | | | Gnosis (formerly xDAI) | ✓ | ✓\* | | | | @@ -95,79 +95,79 @@ The table below illustrates where each network is in the network integration pro | Optimism | ✓ | | | | | | Fantom | ✓ | | | | | -\* The network is currently in beta on The Graph's decentralized network. +\ \* الشبكة حاليًا في مرحلة بيتا على الشبكة اللامركزية لـ The Graph. -## Query Fees, API Keys, and Billing FAQs +## الأسئلة الشائعة حول رسوم الاستعلام ومفاتيح API والفوترة -### How are query fees priced? +### كيف يتم تسعير رسوم الاستعلام؟ -Query fee prices are impacted by query demand on the decentralized network. Core developers created a query pricing cost model language called [Agora](https://github.com/graphprotocol/agora). It enables Indexers to price queries efficiently. Learn more in the [Agora documentation](https://github.com/graphprotocol/agora/blob/master/docs/README.md). +تتأثر أسعار رسوم الاستعلام بكمية طلب الاستعلام على الشبكة اللامركزية. أنشأ المطورون الأساسيون لغة لنموذج تكلفة تسعير الاستعلام تسمى [ Agora ](https://github.com/graphprotocol/agora).حيث أنها تمكن المفهرسين من تسعير الاستعلامات بكفاءة. تعرف على المزيد في [ وثائق Agora ](https://github.com/graphprotocol/agora/blob/master/docs/README.md). -### How can I set a maximum query budget? +### كيف يمكنني تعيين حد أقصى لميزانية الاستعلام؟ -Users can set a max query budget in the Subgraph Studio [API Key](https://thegraph.com/studio/apikeys/) section, under the Budget tab. [Watch this video](https://www.loom.com/share/b5fc533e48584cb694017392c80c75e0) for an overview of that process, as well as adjusting other parts of your API Key. +يمكن للمستخدمين تعيين الحد الأقصى لميزانية الاستعلام في قسم [ API Key ](https://thegraph.com/studio/apikeys/) في Subgraph Studio ، ضمن علامة التبويب Budget. [ شاهد هذا الفيديو ](https://www.loom.com/share/b5fc533e48584cb694017392c80c75e0) للحصول على نظرة عامة حول هذه العملية ، بالإضافة إلى ضبط الأجزاء الأخرى لـ API Key. -Please note that setting your max query budget too low will exclude Indexers, potentially leading to poor quality service in the form of failed queries, slow queries, etc. +يرجى ملاحظة أن ضبط حدود الميزانية القصوى للاستعلام الخاص بك بشكل منخفض جدًا سيؤدي إلى استبعاد المُفهرسين، مما قد يؤدي في النهاية إلى تقديم خدمة ذات جودة سيئة على شكل استعلامات فاشلة أو بطيئة. -As of the end of September 2022, it's best practice to stay within the $0.00035-$0.0004 range as the lowest max query budget. +اعتبارًا من نهاية سبتمبر 2022 ، فإنه من الأفضل أن يكون أدنى حد أقصى لميزانية الاستعلام في نطاق من 0.00035 إلى 0.0004 دولار. -### How can I protect my API Key? +### كيف يمكنني حماية API Key الخاص بي؟ Users are encouraged to restrict the API key by both subgraph and domain in the [Subgraph Studio](https://thegraph.com/studio/): -![Restrict domain](/img/restrictdomain.png) +![تقييد الدومين](/img/restrictdomain.png) -### How do I fill up my API key to pay for query fees? +### كيف أقوم بملء مفتاح API الخاص بي لدفع رسوم الاستعلام؟ -You can fill up your billing balance in the Subgraph Studio [Billing Dashboard](https://thegraph.com/studio/billing/) by pressing the "Add GRT" button. There is ongoing work to improve this experience to add more seamless and recurring payments. +يمكنك ملء رصيد الفوترة الخاص بك في [ لوحة معلومات الفواتير ](https://thegraph.com/studio/billing/) في Subgraph Studio عن طريق الضغط على الزر "Add GRT". هناك عمل مستمر للتحسين لإضافة مدفوعات أكثر سلاسة. -[This video](https://www.loom.com/share/a81de6ef11d64c62872ea210c58c6af5) has an overview of that process. +يحتوي [ هذا الفيديو ](https://www.loom.com/share/a81de6ef11d64c62872ea210c58c6af5) على نظرة عامة حول هذه العملية. -### How do I set alerts for low billing balances in my API key? +### كيف يمكنني ضبط تنبيهات لأرصدة الفوترة المنخفضة في مفتاح API الخاص بي؟ -Users should set a billing alert to their email address [here](https://thegraph.com/studio/settings/). +يجب على المستخدمين ضبط تنبيه فوترة لعنوان بريدهم الإلكتروني [ هنا ](https://thegraph.com/studio/settings/). -Also, a banner will flash within a user's UI to warn when a billing balance is getting low. +أيضًا ، ستومض لافتة داخل واجهة المستخدم للتحذير عند انخفاض رصيد الفوترة. -What are the best practices for managing my API key settings? +كيف أدير إعدادات مفتاح API الخاصة بي بأفضل طريقة؟ -A max query budget of $0.0004 is recommended to maintain low average query prices while maintaining high quality of service. This can be done in the budget billing tab of the [API Key section](https://thegraph.com/studio/apikeys/). +يوصى بميزانية استعلام قدرها 0.0004 دولارا كحد أقصى وذلك للحفاظ على متوسط أسعار استعلام منخفضة مع الحفاظ على خدمة ذات جودة عالية. يمكنك القيام بذلك في علامة التبويب "budget billing" [ في قسم API Key ](https://thegraph.com/studio/apikeys/). -## Migration FAQs +## الأسئلة الشائعة حول الترحيل -### How can I migrate my subgraph to The Graph's decentralized network? +### كيف يمكنني ترحيل الرسم البياني الفرعي الخاص بي إلى الشبكة اللامركزية لـ The Graph؟ -Learn how to migrate your subgraph to The Graph Network with this simple [step-by-step guide](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph) or [this video](https://www.youtube.com/watch?v=syXwYEk-VnU&t=1s). +تعرف على كيفية ترحيل الغراف الفرعي إلى شبكة الغراف باستخدام هذا [ الدليل البسيط خطوة بخطوة ](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph) أو [ عن طريق هذا الفيديو ](https://www.youtube.com/watch?v=syXwYEk-VnU&t=1s). -### Are there migration grants for subgraphs that migrate early to The Graph Network? +### Are there Network Grants for subgraphs that migrate early to The Graph Network? -Yes. To apply for a grant, reach out [here](mailto:migration@thegraph.foundation). +Yes. To apply for a Network Grant, reach out [here](mailto:migration@thegraph.foundation). -### Is there any financial/technical/marketing support through the migration process from The Graph ecosystem? +### هل يوجد أي دعم مالي / فني / تسويقي عبر عملية الترحيل من النظام البيئي للغراف؟ -There are migration grants for projects to use to curate subgraphs (to attract Indexers) and pay for initial query fees (apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com)), a [direct channel](http://thegraph.com/discord) to engineers to help every step of the way, and prioritized marketing campaigns to showcase your project after migration, exampled in these Twitter threads: [1](https://twitter.com/graphprotocol/status/1496891582401814537), [2](https://twitter.com/graphprotocol/status/1491926128302379008), & [3](https://twitter.com/graphprotocol/status/1491126245396201473). +There are Network Grants for projects to use to curate subgraphs (to attract Indexers) and pay for initial query fees (apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com)), a [direct channel](http://thegraph.com/discord) to engineers to help every step of the way, and prioritized marketing campaigns to showcase your project after migration, exampled in these Twitter threads: [1](https://twitter.com/graphprotocol/status/1496891582401814537), [2](https://twitter.com/graphprotocol/status/1491926128302379008), & [3](https://twitter.com/graphprotocol/status/1491126245396201473). -### How long do queries take? +### ما هي المدة التي تستغرقها الاستعلامات؟ -Queries take an average of 150-300 milliseconds on the decentralized network. +تستغرق الاستعلامات ما متوسطه 150-300 مللي ثانية على الشبكة اللامركزية. -### Is the billing process on The Graph Network more complex than on the hosted service? +### هل عملية الفوترة على شبكة الغراف أكثر تعقيدًا من الخدمة المستضافة؟ Yes, the UX for the network is not yet at quality parity with the hosted service. The billing UX, in particular, is still in very early stages and there are many moving parts that the core dev teams are working to abstract away from the process. Much of these improvements will be made public in the near future. -### Can I pay for The Graph Network queries in fiat, credit card, or stablecoins? +### هل يمكنني الدفع مقابل استعلامات شبكة Graph بالأوراق النقدية وبطاقات الإئتمان والعملات المستقرة؟ -In the coming months, the number of steps that users need to take to pay for their subgraphs will be vastly reduced. While payments will still be made in GRT, efforts to implement a fiat on-ramp and automated payment systems to convert fiat and crypto into GRT to make recurring payments are already underway. +في الأشهر القادمة، سيتم التقليل وبشكل كبير عدد الخطوات التي يحتاجها المستخدمون لمدفوعات الغراف الفرعي. وعلى الرغم من أن المدفوعات ستستمر في القيام بذلك باستخدام GRT، إلا أن هناك جهود جارية لتنفيذ نظام يسمح بتحويل العملات الورقية والعملات المشفرة إلى GRT تلقائيًا لتسهيل المدفوعات المتكررة. ### Will the network ever have the same UX as the hosted service? -While there is still work to do, the aim is to offer comparable if not better quality UX on The Graph Network than currently exists on the hosted service. Short term, the aim is to offer a more streamlined and predictable billing experience that helps users focus more time building high-quality dapps. +بينما لا يزال هناك عمل يجب القيام به، إلا أن الهدف هو تقديم تجربة مستخدم مماثلة، إن لم تكن أفضل من تلك المتاحة حاليًا في الخدمة المستضافة. على المدى القصير، يهدف الجهود إلى توفير فوترة أكثر تيسيرًا وتنبؤًا تساعد المستخدمين على التركيز على بناء تطبيقات عالية الجودة. ### How can I ensure that my subgraph will be picked up by Indexer on The Graph Network? It is recommended to curate with at least 10,000 GRT, which users can do in the same transaction as when they publish. Users can also ask the curation community to curate their subgraph [here](https://t.me/CurationStation). -There are migration grants for the early migrants to cover these initial costs. Feel free to apply [here](mailto:migration@thegraph.foundation). +There are Network Grants for the early migrants to cover these initial costs. Feel free to apply [here](mailto:migration@thegraph.foundation). ### Why does a subgraph need curation signal? What if there isn't enough signal on my subgraph from curators? @@ -181,63 +181,65 @@ If you are the first to signal a subgraph, your GRT signaled amount will not go Short term, the initial curation model on Arbitrum will provide principle-protection to curation signal. Longer term, the core devs will prioritize offering developers the capacity to rent curation signal, opening up a more predictable pricing experience while still ensuring subgraphs are sufficiently indexed. -### How do I switch the subgraph API in the front-end? +### كيف أقوم بتبديل subgraph API في الواجهة الأمامية؟ -After at least one Indexer has fully indexed a subgraph, a user can query the decentralized network. +بعد قيام مفهرس واحد على الأقل بفهرسة الرسم البياني الفرعي بالكامل ، يمكن للمستخدم الاستعلام عن الشبكة اللامركزية. -In order to retrieve the query URL for your subgraph, you can copy/paste it by clicking on the symbol next to the query URL. You will see something like this: +لاسترداد عنوان الاستعلام للرسم الفرعي الخاص بك ، يمكنك نسخه / لصقه بالنقر فوق الرمز الموجود بجوار عنوان الاستعلام. سترى شيئًا مثل هذا: `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` -Simply replace [api-key] with an API key generated in the Subgraph Studio [API Key section](https://thegraph.com/studio/apikeys/). +ما عليك سوى استبدال [api-key] بمفتاح API الذي تم إنشاؤه في [ قسم API Key ](https://thegraph.com/studio/apikeys/) في Subgraph Studio. -### How much do queries cost? +### كم تكلفة الاستعلامات؟ -The average query cost within the network varies. For the month of September 2022, the average price per query fee cost ranged from $0.00012 - $0.00020. +يختلف متوسط تكلفة الاستعلام داخل الشبكة. لشهر سبتمبر 2022 ، فقد تراوح متوسط سعر تكلفة رسوم الاستعلام من 0.00012 إلى 0.00020 دولار. -### How can I find out how much volume my subgraph has and how much it will cost? +### كيف يمكنني معرفة حجم الرسم البياني الفرعي الخاص بي وكم سيكلف؟ -Hosted service volume data is not public. Please reach out to get volume and cost estimates [here](mailto:migration@thegraph.foundation). +بيانات حجم الخدمة المستضافة ليست عامة. يرجى التواصل للحصول على تقديرات الحجم والتكلفة [ هنا ](mailto:migration@thegraph.foundation). ### How does the gateway work? Is it fully decentralized? The gateway process queries so Indexers can serve dapps. The gateways are in an intermediate phase that is being progressively decentralized. More on this soon. -## Using The Network FAQs +## استخدام الأسئلة الشائعة حول الشبكة -### Is there a cost to upgrade my subgraph? +### Is there a cost to update my subgraph? -Yes, it is 1% of curation signaled. The 1% is split evenly between Curators (0.5%) and subgraph developers (0.5%). So, for every 10K GRT signaled, it costs subgraph developers 50 GRT to upgrade. +Yes, it is 1% of curation signaled. The 1% is split evenly between Curators (0.5%) and subgraph developers (0.5%). So, for every 10K GRT signaled, it costs subgraph developers 50 GRT to update. -### How do I speed up sync time? +### كيف أقوم بإسراع وقت المزامنة؟ -Minimize the use of smart contract calls within the subgraph. Accessing a smart contract state requires an eth_call to the RPC, which slows down sync times. +قلل من استخدام استدعاءات العقود الذكية داخل الغراف الفرعي. الوصول إلى حالة العقد الذكي يتطلب وجود eth_call إلى RPC، وهذا يبطئ وقت المزامنة. -### Is there multisig support in Subgraph Studio as I migrate? +### هل هناك دعم لـ multisig في Subgraph Studio أثناء الترحيل؟ -Yes, multisig support has recently been added. You can find more information [here](https://thegraph.com/docs/studio/multisig). +نعم ، تمت إضافة دعم لـ multisig مؤخرًا. يمكنك العثور على مزيد من المعلومات [ هنا ](https://thegraph.com/docs/studio/multisig). ### ما هي عناوين عقود عملة القراف على إيثيريوم و أربترم؟ - Ethereum: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` +- Ethereum Goerli: `0x5c946740441C12510a167B447B7dE565C20b9E3C` - أربترم: `0x9623063377AD1B27544C965cCd7342f7EA7e88C7` +- Arbitrum Goerli: `0x18c924bd5e8b83b47efadd632b7178e2fd36073d` -### How much GRT do projects usually keep in their API Key? +### ما مقدار GRT التي تحتفظ بها المشاريع عادةً في مفتاح API الخاص بهم؟ Many projects keep 30-60 days worth of GRT in their API key, so they don't need to refill often. To understand what your 30-60 day GRT fees would be, please reach out [here](mailto:migration@thegraph.foundation). -### How are query payments made on the decentralized network?  +### كيف تتم مدفوعات الاستعلام على الشبكة اللامركزية؟  الرسوم يتم فوترتها أسبوعياً وتسحب من مفتاح واجهة تطبيق البرمجة الخاص بالمستخدم, بعملة القراف المجسرة إلى أربترم وتستقر هناك. -### How are API keys used for subgraphs on the decentralized network? +### كيف يتم استخدام مفاتيح API للرسم البياني الفرعي على الشبكة اللامركزية؟ -API Keys empower users to have a say in both the max query prices they pay and to prioritize factors like price, economic freshness, and query speed. +تمنح الـ API المستخدمين القدرة على التحكم في الحد الأقصى لأسعار الاستعلامات التي يدفعونها وتحديد الأوليات مثل السعر والتحديث الاقتصادي وسرعة الاستعلام. -### How does quality of service currently compare between the hosted service and the decentralized network? +### كيف تقارن جودة الخدمة حاليًا بين الخدمة المستضافة والشبكة اللامركزية؟ The hosted service and decentralized network have about the same median latency, but the decentralized network tends to have higher latency at higher percentiles. 200 rates for queries are generally similar, with both > 99.9%. As a result of its decentralization, the network has not had a broad outage across subgraphs, whereas the hosted service does on rare occasions have temporary outages as a result of its centralized nature. -### What if my question isn't answered by these FAQs? +### ماذا لو لم يتم الرد على سؤالي من خلال هذه الأسئلة الشائعة؟ -Please reach out to [migration@thegraph.foundation](mailto:migration@thegraph.foundation) for any additional assistance. +يرجى التواصل مع [igration@thegraph.foundation ](mailto:migration@thegraph.foundation) للحصول على أي مساعدة إضافية. diff --git a/website/pages/ar/network/benefits.mdx b/website/pages/ar/network/benefits.mdx index 3be10edc4e03..13a4da8d42e6 100644 --- a/website/pages/ar/network/benefits.mdx +++ b/website/pages/ar/network/benefits.mdx @@ -79,7 +79,9 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). -Some users may need to upgrade subgraphs. Due to Ethereum gas fees, upgrades cost ~$50 per upgrade at time of this writing. +Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. + +Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. ## No Setup Costs & Greater Operational Efficiency @@ -91,4 +93,4 @@ The Graph’s decentralized network gives users access to geographic redundancy Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. -Start using The Graph Network today, and learn how to [migrate or deploy your subgraph](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph). +Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/ar/network/curating.mdx b/website/pages/ar/network/curating.mdx index f8467d0c5732..4793be612934 100644 --- a/website/pages/ar/network/curating.mdx +++ b/website/pages/ar/network/curating.mdx @@ -4,7 +4,7 @@ title: Curating Curators are critical to the Graph decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through the Explorer, curators are able to view network data to make signaling decisions. The Graph Network rewards curators who signal on good quality subgraphs with a share of the query fees that subgraphs generate. Curators are economically incentivized to signal early. These cues from curators are important for Indexers, who can then process or index the data from these signaled subgraphs. -يمكن للمنسقين اتخاذ القرار إما بالإشارة إلى إصدار معين من Subgraphs أو الإشارة باستخدام الترحيل التلقائي auto-migrate. عند الإشارة باستخدام الترحيل التلقائي ، ستتم دائما ترقية حصص المنسق إلى أحدث إصدار ينشره المطور. وإذا قررت الإشارة إلى إصدار معين، فستظل الحصص دائما في هذا الإصدار المحدد. +When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. تذكر أن عملية التنسيق محفوفة بالمخاطر. نتمنى أن تبذل قصارى جهدك وذلك لتنسق ال Subgraphs الموثوقة. إنشاء ال subgraphs لا يحتاج إلى ترخيص، لذلك يمكن للأشخاص إنشاء subgraphs وتسميتها بأي اسم يرغبون فيه. لمزيد من الإرشادات حول مخاطر التنسيق ، تحقق من[The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) @@ -60,7 +60,7 @@ Curators make The Graph network efficient and signaling is the process that cura ## المخاطر 1. سوق الاستعلام يعتبر حديثا في The Graph وهناك خطر من أن يكون٪ APY الخاص بك أقل مما تتوقع بسبب ديناميكيات السوق الناشئة. -2. رسوم التنسيق - عندما يشير المنسق إلى GRT على subgraph ، فإنه يتحمل ضريبة تنسيق بنسبة 1٪. يتم حرق هذه الرسوم ويودع الباقي في العرض الاحتياطي لمنحنى الترابط. +2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. 3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). 4. يمكن أن يفشل ال subgraph بسبب خطأ. ال subgraph الفاشل لا يمكنه إنشاء رسوم استعلام. نتيجة لذلك ، سيتعين عليك الانتظار حتى يصلح المطور الخطأ وينشر إصدارا جديدا. - إذا كنت مشتركا في أحدث إصدار من subgraph ، فسيتم ترحيل migrate أسهمك تلقائيا إلى هذا الإصدار الجديد. هذا سيتحمل ضريبة تنسيق بنسبة 0.5٪. @@ -79,13 +79,13 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma - يمكن للمنسقين استخدام فهمهم للشبكة لمحاولة التنبؤ كيف لل subgraph أن يولد حجم استعلام أعلى أو أقل في المستقبل - Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. -### 3. ما هي تكلفة ترقية ال subgraph؟ +### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. -### 4. كم مرة يمكنني ترقية ال subgraph الخاص بي؟ +### 4. How often can I update my subgraph? -يفضل عدم ترقية ال subgraphs بشكل متكرر. ارجع للسؤال أعلاه لمزيد من التفاصيل. +It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. ### 5. هل يمكنني بيع أسهم التنسيق الخاصة بي؟ diff --git a/website/pages/ar/network/developing.mdx b/website/pages/ar/network/developing.mdx index 1fc2f908c238..3ee32c2df2e6 100644 --- a/website/pages/ar/network/developing.mdx +++ b/website/pages/ar/network/developing.mdx @@ -34,11 +34,11 @@ In order to make queries, developers must generate an API key, which can be done Developers are also able to express an Indexer preference to the gateway, for example preferring Indexers whose query response is faster, or whose data is most up to date. These controls are set in the Subgraph Studio. -### Upgrading Subgraphs +### Updating Subgraphs After a time a subgraph developer may want to update their subgraph, perhaps fixing a bug or adding new functionality. The subgraph developer may deploy new version(s) of their subgraph to the Subgraph Studio for rate-limited development and testing. -Once the Subgraph Developer is ready to upgrade, they can initiate a transaction to point their subgraph at the new version. Upgrading the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. +Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. ### Deprecating Subgraphs @@ -50,4 +50,4 @@ Some developers will engage with the full subgraph lifecycle on the network, pub ### Developers and Network Economics -Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is upgraded. +Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. diff --git a/website/pages/ar/network/explorer.mdx b/website/pages/ar/network/explorer.mdx index 6fadebb1d922..ecd2f2798a6a 100644 --- a/website/pages/ar/network/explorer.mdx +++ b/website/pages/ar/network/explorer.mdx @@ -87,7 +87,7 @@ If you want to learn more about the Curator role, you can do so by visiting the - كمية ال GRT التي يمتلكونها حاليا في البروتوكول - تاريخ آخر تفويض لهم -If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/network/delegators). +If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). ## Network diff --git a/website/pages/ar/network/indexing.mdx b/website/pages/ar/network/indexing.mdx index 81f599b9f304..e2b8314c6414 100644 --- a/website/pages/ar/network/indexing.mdx +++ b/website/pages/ar/network/indexing.mdx @@ -38,9 +38,9 @@ Allocations are continuously accruing rewards while they're active and allocated ### Can pending indexing rewards be monitored? -The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) function that can be used to check the pending rewards for a specific allocation. +يحتوي عقد RewardsManager على وظيفة [ الحصول على المكافآت ](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) للقراءة فقط يمكن استخدامها للتحقق من المكافآت المعلقة لتخصيص معين. -Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: +تشتمل العديد من لوحات المعلومات التي أنشأها المجتمع على قيم المكافآت المعلقة ويمكن التحقق منها بسهولة يدويًا باتباع الخطوات التالية: 1. استعلم عن [mainnet الفرعيةرسم بياني ](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) للحصول على IDs لجميع المخصصات النشطة: @@ -62,10 +62,10 @@ Many of the community-made dashboards include pending rewards values and they ca - انتقل إلى [ واجهة Etherscan لعقد المكافآت Rewards contract ](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) -* To call `getRewards()`: - - Expand the **10. getRewards** dropdown. +* لاستدعاء `getRewards()`: + - قم بتوسيع ** 10 .الحصول على المكافآت ** القائمة المنسدلة. - أدخل ** معرّف التخصيص ** في الإدخال. - - Click the **Query** button. + - انقر فوق الزر ** الاستعلام **. ### ما هي الاعتراضات disputes وأين يمكنني عرضها؟ @@ -108,7 +108,7 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that ### ما هي المتطلبات للهاردوير؟ - **صغيرة**ـ يكفي لبدء فهرسة العديد من ال subgraphs، من المحتمل أن تحتاج إلى توسيع. -- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- ** قياسية ** - هو الإعداد الافتراضي ، ويتم استخدامه في مثال بيانات نشر k8s / terraform. - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. - **كبيرة** - مُعدة لفهرسة جميع ال subgraphs المستخدمة حاليا وأيضا لخدمة طلبات حركة مرور البيانات ذات الصلة. @@ -309,7 +309,7 @@ kubectl config use-context $(kubectl config get-contexts --output='name' - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **متطلبات إضافية لمستخدمي Ubuntu **- لتشغيل عقدة الرسم البياني على Ubuntu ، قد تكون هناك حاجة إلى بعض الحزم الإضافية. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config @@ -565,7 +565,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +على سبيل المثال ، إذا كانت القاعدة العامة لديها`minStake` من ** 5 ** (GRT) ، فأي نشر subgraph به أكثر من 5 (GRT) من الحصة المخصصة ستتم فهرستها. قواعد العتبة تتضمن `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, `minAverageQueryFees`. نموذج البيانات Data model: @@ -758,7 +758,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/networ 1. افتح [ تطبيق Remix ](https://remix.ethereum.org/) على المتصفح -2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. في `File Explorer` أنشئ ملفا باسم ** GraphToken.abi ** باستخدام [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. مع تحديد `GraphToken.abi` وفتحه في المحرر ، قم بالتبديل إلى Deploy و `Run Transactions` في واجهة Remix. @@ -800,6 +800,6 @@ After being created by an Indexer a healthy allocation goes through four states. - **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. +- ** مُطالب به ** - هي الحالة النهائية للتخصيص ؛ وهي التي سلكت مجراها كمخصصة نشطة ، وتم توزيع جميع المكافآت المؤهلة وتمت المطالبة بخصومات رسوم الاستعلام. Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/ar/new-chain-integration.mdx b/website/pages/ar/new-chain-integration.mdx new file mode 100644 index 000000000000..94ee68502336 --- /dev/null +++ b/website/pages/ar/new-chain-integration.mdx @@ -0,0 +1,75 @@ +--- +title: Integrating New Networks +--- + +Graph Node can currently index data from the following chain types: + +- Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) +- NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) +- Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) +- Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) + +If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. + +If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. + +** 1. EVM JSON-RPC** + +If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). + +**2. Firehose** + +For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/README/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. + +## Difference between EVM JSON-RPC & Firehose + +While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. + +New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. + +> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) + +--- + +## Testing an EVM JSON-RPC + +For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON RPC methods: + +- `eth_getLogs` +- `eth_call` \_(for historical blocks, with EIP-1898 - requires archive node): +- `eth_getBlockByNumber` +- `eth_getBlockByHash` +- `net_version` +- `eth_getTransactionReceipt`, in a JSON-RPC batch request +- _`trace_filter`_ _(optionally required for Graph Node to support call handlers)_ + +### Graph Node Configuration + +**Start by preparing your local environment** + +1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON RPC compliant URL + > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. +3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ + +\*Test the integration by locally deploying a subgraph\*\* + +1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) +2. Create a simple example subgraph. Some options are below: + 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point + 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) +3. Adapt the resulting `subgraph.yaml` by changing [`dataSources.network`](http://dataSources.network) to the same name previously passed on to Graph Node. +4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` + +Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. + +--- + +## Integrating a new Firehose-enabled chain + +Integrating a new chain is also possible using the Firehose approach. This is currently the best option for non-EVM chains and a requirement for substreams support. Additional documentation focuses on how Firehose works, adding Firehose support for a new chain and integrating it with Graph Node. Recommended docs for integrators: + +1. [General docs on Firehose](firehose/) +2. [Adding Firehose support for a new chain](firehose/integrate-new-chains/new-blockchains/) +3. [Integrating Graph Node with a new chain via Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) diff --git a/website/pages/ar/operating-graph-node.mdx b/website/pages/ar/operating-graph-node.mdx index bf932e33ad56..ac8e5046131d 100644 --- a/website/pages/ar/operating-graph-node.mdx +++ b/website/pages/ar/operating-graph-node.mdx @@ -42,7 +42,7 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **متطلبات إضافية لمستخدمي Ubuntu **- لتشغيل عقدة الرسم البياني على Ubuntu ، قد تكون هناك حاجة إلى بعض الحزم الإضافية. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config @@ -69,33 +69,6 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -### الشروع في استخدام Docker - -#### المتطلبات الأساسية - -- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yml`. - -#### Setup - -1. انسخ Graph Node وانتقل إلى دليل Docker: - -```sh -git clone http://github.com/graphprotocol/graph-node -cd graph-node/docker -``` - -2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yml`using the included script: - -```sh -./setup.sh -``` - -3. ابدأ Graph Node محلية والتي ستتصل ب Ethereum endpoint الخاصة بك: - -```sh -docker-compose up -``` - ### Getting started with Kubernetes A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). diff --git a/website/pages/ar/querying/querying-best-practices.mdx b/website/pages/ar/querying/querying-best-practices.mdx index ce2b7b2114f6..24f0e802e02a 100644 --- a/website/pages/ar/querying/querying-best-practices.mdx +++ b/website/pages/ar/querying/querying-best-practices.mdx @@ -1,22 +1,22 @@ --- -title: Querying Best Practices +title: أفضل الممارسات للاستعلام --- -The Graph provides a decentralized way to query data from blockchains. +يوفر The Graph طريقة لامركزية للاستعلام عن البيانات من سلاسل الكتل. -The Graph network's data is exposed through a GraphQL API, making it easier to query data with the GraphQL language. +يتم عرض بيانات شبكة Graph من خلال GraphQL API ، مما يسهل الاستعلام عن البيانات باستخدام لغة GraphQL. -This page will guide you through the essential GraphQL language rules and GraphQL queries best practices. +ستوجهك هذه الصفحة خلال القواعد الأساسية للغة GraphQL وأفضل ممارسات استعلامات GraphQL. --- -## Querying a GraphQL API +## الاستعلام عن واجهة برمجة تطبيقات GraphQL -### The anatomy of a GraphQL query +### بنية استعلام GraphQL -Unlike REST API, a GraphQL API is built upon a Schema that defines which queries can be performed. +على عكس REST API ، فإن GraphQL API مبنية على مخطط يحدد الاستعلامات التي يمكن تنفيذها. -For example, a query to get a token using the `token` query will look as follows: +على سبيل المثال ، طلب الاستعلام للحصول على توكن باستخدام استعلام `token` سيبدو كما يلي: ```graphql query GetToken($id: ID!) { @@ -27,7 +27,7 @@ query GetToken($id: ID!) { } ``` -which will return the following predictable JSON response (_when passing the proper `$id` variable value_): +والتي ستعرض استجابة JSON التالية التي يمكن التنبؤ بها (_ عند تمرير القيمة المتغيرة `$id` المناسبة _): ```json { @@ -38,9 +38,9 @@ which will return the following predictable JSON response (_when passing the pro } ``` -GraphQL queries use the GraphQL language, which is defined upon [a specification](https://spec.graphql.org/). +تستخدم استعلامات GraphQL لغة GraphQL ، التي تم تحديدها في [المواصفات](https://spec.graphql.org/). -The above `GetToken` query is composed of multiple language parts (replaced below with `[...]` placeholders): +يتكون استعلام `GetToken` أعلاه من أجزاء متعددة للغة (تم استبدالها أدناه بـ placeholders `[...]`): ```graphql query [operationName]([variableName]: [variableType]) { @@ -52,22 +52,20 @@ query [operationName]([variableName]: [variableType]) { } ``` -While the list of syntactic do's and don'ts is long, here are the essential rules to keep in mind when it comes to writing GraphQL queries: +على الرغم من أن قائمة القواعد التي يجب اتباعها طويلة، إلا أن هناك قواعد أساسية يجب أخذها في الاعتبار عند كتابة استعلامات GraphQL: -- Each `queryName` must only be used once per operation. -- Each `field` must be used only once in a selection (we cannot query `id` twice under `token`) -- Some `field`s or queries (like `tokens`) return complex types that require a selection of sub-field. Not providing a selection when expected (or providing one when not expected - for example, on `id`) will raise an error. To know a field type, please refer to [The Graph Explorer](/network/explorer). -- Any variable assigned to an argument must match its type. -- In a given list of variables, each of them must be unique. -- All defined variables must be used. +- يجب استخدام كل `queryName` مرة واحدة فقط لكل عملية. +- يجب استخدام كل `field` مرة واحدة فقط في التحديد (لا يمكننا الاستعلام عن `id` مرتين ضمن `token`) +- بعض `field` أو الاستعلامات (مثل `tokens`) ترجع أنواعًا معقدة تتطلب تحديدًا للحقول الفرعية. عدم تقديم تحديد عندما يكون متوقعًا (أو تقديم تحديد عندما لا يكون متوقعًا - على سبيل المثال ، الـ `id`) سيؤدي إلى ظهور خطأ. لمعرفة نوع الحقل ، يرجى الرجوع إلى [ The Graph Explorer ](/network/explorer). +- يجب أن يكون أي متغير تم تعيينه لوسيط متطابقًا مع نوعه. +- في قائمة المتغيرات المعطاة ، يجب أن يكون كل واحد منها فريدًا. +- يجب استخدام جميع المتغيرات المحددة. -Failing to follow the above rules will end with an error from the Graph API. +إذا لم تتبع القواعد المذكورة أعلاه ، فستحدث خطأ من Graph API. -For a complete list of rules with code examples, please look at our GraphQL Validations guide. +للحصول على قائمة كاملة بالقواعد مع أمثلة التعليمات البرمجية ، يرجى إلقاء نظرة على دليل التحقق من GraphQL. -
- -### Sending a query to a GraphQL API +### إرسال استعلام إلى GraphQL API GraphQL is a language and set of conventions that transport over HTTP. @@ -75,12 +73,10 @@ It means that you can query a GraphQL API using standard `fetch` (natively or vi However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result - -
+- التعامل مع ال subgraph عبر السلاسل: الاستعلام من عدة subgraphs عبر استعلام واحد +- [تتبع الكتلة التلقائي](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [ترقيم الصفحات التلقائي](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- نتيجة مكتوبة بالكامل Here's how to query The Graph with `graph-client`: @@ -108,15 +104,13 @@ main() More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). -
- Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. --- -## Writing GraphQL queries +## كتابة استعلامات GraphQL -### Always write static queries +### اكتب دائمًا استعلامات ثابتة A common (bad) practice is to dynamically build query strings as follows: @@ -136,10 +130,10 @@ query GetToken { While the above snippet produces a valid GraphQL query, **it has many drawbacks**: -- it makes it **harder to understand** the query as a whole +- يجعل من الصعب ** فهم ** الإستعلام ككل - developers are **responsible for safely sanitizing the string interpolation** -- not sending the values of the variables as part of the request parameters **prevent possible caching on server-side** -- it **prevents tools from statically analyzing the query** (ex: Linter, or type generations tools) +- عدم إرسال قيم المتغيرات كجزء من معاملات الطلب ** يمنع التخزين المؤقت المحتمل على جانب الخادم ** +- ** يمنع هذا الأمر أدوات التحليل الثابت للإستعلام**(على سبيل المثال: أدوات التدقيق اللغوي أو أدوات إنشاء الأنواع Linter) For this reason, it is recommended to always write queries as static strings: @@ -165,10 +159,10 @@ const result = await execute(query, { Doing so brings **many advantages**: -- **Easy to read and maintain** queries -- The GraphQL **server handles variables sanitization** -- **Variables can be cached** at server-level -- **Queries can be statically analyzed by tools** (more on this in the following sections) +- ** إستعلامات سهلة القراءة والصيانة ** +- يقوم خادم GraphQL ** بتصحيح المتغيرات ** +- ** يمكن تخزين المتغيرات مؤقتًا ** على مستوى الخادم +- ** يمكن تحليل طلبات البحث بشكل ثابت بواسطة الأدوات ** (المزيد حول هذا الموضوع في الأقسام التالية) **Note: How to include fields conditionally in static queries** @@ -199,9 +193,7 @@ const result = await execute(query, { Note: The opposite directive is `@skip(if: ...)`. -
- -### Performance tips +### نصائح حول الأداء **"Ask for what you want"** @@ -289,9 +281,7 @@ const { result: { tokens, counters } } = execute(query) This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. -
- -### Leverage GraphQL Fragments +### الاستفادة من أجزاء GraphQL A helpful feature to write GraphQL queries is GraphQL Fragment. @@ -318,7 +308,7 @@ query { Such repeated fields (`id`, `active`, `status`) bring many issues: - harder to read for more extensive queries -- when using tools that generate TypeScript types based on queries (_more on that in the last section_), `newDelegate` and `oldDelegate` will result in two distinct inline interfaces. +- عند استخدام الأدوات التي تنشئ أنواع TypeScript بناءً على الاستعلامات (_المزيد عن ذلك في القسم الأخير_)، و `newDelate` و `oldDelegate` سينتج عنهما واجهتين مضمنتان متمايزتين. A refactored version of the query would be the following: @@ -348,9 +338,7 @@ Using GraphQL `fragment` will improve readability (especially at scale) but also When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). -
- -### GraphQL Fragment do's and don'ts +### ما يجب فعله وما لا يجب فعله في GraphQL Fragment **Fragment base must be a type** @@ -401,8 +389,8 @@ For most use-case, defining one fragment per type (in the case of repeated field Here is a rule of thumb for using Fragment: -- when fields of the same type are repeated in a query, group them in a Fragment -- when similar but not the same fields are repeated, create multiple fragments, ex: +- عند تكرار الحقول من نفس النوع في استعلام ، قم بتجميعها في Fragment +- عند تكرار الحقول متشابهه ولكن غير متطابقة ، قم بإنشاء fragments متعددة ، على سبيل المثال: ```graphql # base fragment (mostly used in listing) @@ -425,7 +413,7 @@ fragment VoteWithPoll on Vote { --- -## The essential tools +## الأدوات الأساسية ### GraphQL web-based explorers @@ -433,8 +421,6 @@ Iterating over queries by running them in your application can be cumbersome. Fo If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). -
- ### GraphQL Linting In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. @@ -445,14 +431,12 @@ In order to keep up with the mentioned above best practices and syntactic rules, [Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: -- `@graphql-eslint/fields-on-correct-type`: is a field used on a proper type? -- `@graphql-eslint/no-unused variables`: should a given variable stay unused? -- and more! +- `@ graphql-eslint / field-on-right-type`: هل يتم استخدام الحقل على النوع المناسب؟ +- `@ graphql-eslint / no-unused variables`: هل يجب أن يبقى المتغير المعطى غير مستخدم؟ +- و اكثر! This will allow you to **catch errors without even testing queries** on the playground or running them in production! -
- ### IDE plugins **VSCode and GraphQL** @@ -460,10 +444,10 @@ This will allow you to **catch errors without even testing queries** on the play The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: - syntax highlighting -- autocomplete suggestions +- اقتراحات الإكمال التلقائي - validation against schema - snippets -- go to definition for fragments and input types +- انتقل إلى تعريف ال fragment وأنواع الإدخال If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. @@ -472,7 +456,7 @@ If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketp The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: - syntax highlighting -- autocomplete suggestions +- اقتراحات الإكمال التلقائي - validation against schema - snippets diff --git a/website/pages/ar/querying/querying-from-an-application.mdx b/website/pages/ar/querying/querying-from-an-application.mdx index f4024adccf39..e49aed0e787b 100644 --- a/website/pages/ar/querying/querying-from-an-application.mdx +++ b/website/pages/ar/querying/querying-from-an-application.mdx @@ -28,17 +28,13 @@ https://gateway.thegraph.com/api//subgraphs/id/ The Graph is providing it own GraphQL client, `graph-client` that supports unique features such as: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result - -
+- التعامل مع ال subgraph عبر السلاسل: الاستعلام من عدة subgraphs عبر استعلام واحد +- [تتبع الكتلة التلقائي](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [ترقيم الصفحات التلقائي](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- نتيجة مكتوبة بالكامل Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. -
- Let's look at how to fetch data from a subgraph with `graphql-client`. To get started, make sure to install The Graph Client CLI in your project: @@ -138,16 +134,12 @@ function App() { export default App ``` -
- **⚠️ Important notice** `graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. -
- ### عميل Apollo [Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. @@ -160,11 +152,9 @@ Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, altho - optimistic UI - local state management -
- Let's look at how to fetch data from a subgraph with Apollo client in a web project. -اولا قم بتثبيت `apollo/client@` و `graphql`: +First, install `@apollo/client` and `graphql`: ```sh npm install @apollo/client graphql @@ -203,7 +193,7 @@ client }) ``` -لاستخدام المتغيرات، يمكنك تمرير وسيطة (argument) الـ `variables` للاستعلام: +To use variables, you can pass in a `variables` argument to the query: ```javascript const tokensQuery = ` @@ -234,8 +224,6 @@ client }) ``` -
- ### URQL Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: @@ -245,11 +233,9 @@ Another option is [URQL](https://formidable.com/open-source/urql/) which is avai - Lightweight bundle (~5x lighter than Apollo Client) - Support for file uploads and offline mode -
- Let's look at how to fetch data from a subgraph with URQL in a web project. -اولا قم بتثبيت `urql` و `graphql`: +First, install `urql` and `graphql`: ```sh npm install urql graphql diff --git a/website/pages/ar/tokenomics.mdx b/website/pages/ar/tokenomics.mdx index 819c321bd725..fd5054c9e9d0 100644 --- a/website/pages/ar/tokenomics.mdx +++ b/website/pages/ar/tokenomics.mdx @@ -1,110 +1,110 @@ --- -title: Tokenomics of The Graph Network -description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token works. +title: اقتصاد التوكن (Tokenomics) لشبكة الغراف +description: تعتمد شبكة The Graphعلى نظام إقتصادي قوي للتشجيع على المشاركة. إليك كيف يعمل GRT ، التوكن الأساسي للعمل في The Graph. --- -- GRT Token Address: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) +- عنوان توكن GRT: [ 0xc944e90c64b2c07662a292be6244bdf05cda44a7 ](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- عنوان توكن GRT على Arbitrum One: [ 0x9623063377AD1B27544C965cCd7342f7EA7e88C7 ](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) The Graph is a decentralized protocol that enables easy access to blockchain data. -It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. +إنه مشابه لنموذج B2B2C ، إلا أنه مدعوم بشبكة لا مركزية من المشاركين. يعمل المشاركون في الشبكة معًا لتوفير البيانات للمستخدمين النهائيين مقابل مكافآت GRT. GRT هو أداة العمل الذي ينسق بين موفري البيانات والمستهلكين. تعمل GRT كأداة مساعدة للتنسيق بين موفري البيانات والمستهلكين داخل الشبكة وتحفيز المشاركين في البروتوكول على تنظيم البيانات بشكل فعال. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +باستخدام The Graph ، يمكن للمستخدمين الوصول بسهولة إلى بيانات البلوكتشين، والدفع فقط مقابل المعلومات المحددة التي يحتاجون إليها. يتم استخدام The Graph بواسطة العديد من [ التطبيقات الشائعة ](https://thegraph.com/explorer) في نظام web3 البيئي اليوم. -The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! +يقوم الغراف بفهرسة بيانات blockchain بنفس طريقة فهرسة Google للويب. في الواقع ، ربما كنت تستخدم الغراف بالفعل دون أن تدرك ذلك. إذا كنت قد شاهدت الواجهة الأمامية لـ dapp الذي يحصل على بياناته من subgraph! ، فقد استعلمت عن البيانات من ال subgraph! The Graph plays a crucial role in making blockchain data more accessible and enabling a marketplace for its exchange. -## The Roles of Network Participants +## أدوار المشاركين على الشبكة -There are four primary network participants: +هناك أربعة أدوار أساسية في الشبكة: -1. Delegators - Delegate GRT to Indexers & secure the network +1. المفوضين (Delegators) - يقومو بتفويض GRT للمفهرسين & تأمين الشبكة -2. Curators - Find the best subgraphs for Indexers +2. المنسقون (Curators) - يبحثون عن أفضل subgraphs للمفهرسين -3. Developers - Build & query subgraphs +3. المطورون - بناء& ال subgraphs للاستعلام -4. Indexers - Backbone of blockchain data +4. المفهرسون (Indexers) - العمود الفقري لبيانات blockchain -Fishermen and Arbitrators are also integral to the network’s success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). +الصيادون والمحكمون (Fishermen و Arbitrators) يلعبون أيضاً دورا حاسما في نجاح الشبكة من خلال مساهماتهم الأخرى، ويدعمون عمل الأدوار الأساسية للمشاركين الآخرين. لمزيد من المعلومات حول أدوار الشبكة، يُرجى [قراءة هذه المقالة](https://thegraph.com/blog/the-graph-grt-token-economics/). -![Tokenomics diagram](/img/updated-tokenomics-image.png) +![رسم بياني لاقتصاد التوكن (Tokenomics diagram)](/img/updated-tokenomics-image.png) -## Delegators (Passively earn GRT) +## المفوِّضين (يربحون GRT بشكل سلبي) Indexers are delegated GRT by Delegators increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1500 GRT in rewards annually. -There is a 0.5% delegation tax which is burned whenever a Delegator delegates GRT on the network. If a Delegator chooses to withdraw their delegated GRT, the Delegator must wait for the 28-epoch unbonding period. Each epoch is 6,646 blocks, which means 28 epochs ends up being approximately 26 days. +هناك ضريبة تفويض بنسبة 0.5٪ يتم حرقها عندما يقوم المفوض بتفويض GRT على الشبكة. إذا قرر أحد المفوضين سحب GRT المفوضة ، فيجب عليه الانتظار لفترة فك الارتباط والتي تستغرق 28 حقبة. كل حقبة تتكون من 6646 كتلة ، مما يعني أن 28 حقبة تستغرق حوالي 26 يومًا. -If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. +إذا كنت تقرأ هذا ، فيمكنك أن تصبح مفوضًا الآن من خلال التوجه إلى [ صفحة المشاركين في الشبكة ](https://thegraph.com/explorer/participants/indexers) ، و تفويض GRT إلى مفهرس من اختيارك. -## Curators (Earn GRT) +## المنسِّقون (كسب GRT) Curators identify high-quality subgraphs, and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. -As of December 2022, subgraph developers are encouraged to curate their subgraph with at least 10,000 GRT. However, this number may be impacted by network activity and community participation. +اعتبارًا من ديسمبر 2022 ، يتم تشجيع مطوري الsubgrap على تنسيق الsubgrap الخاص بهم بما لا يقل عن 10000 GRT. ومع ذلك ، قد يتأثر هذا الرقم بنشاط الشبكة ومشاركة المجتمع. Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. -## Developers +## المطورين -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +يقوم المطورون ببناء الsubgraphs والاستعلام عنها لاسترداد بيانات blockchain. نظرًا لأن الsubgraph مفتوحة المصدر ، يمكن للمطورين الاستعلام عن الsubgraph الموجودة لتحميل بيانات blockchain في dapps الخاصة بهم. يدفع المطورون ثمن الاستعلامات التي يقومون بها ب GRT ، والتي يتم توزيعها على المشاركين في الشبكة. -### Creating a subgraph +### إنشاء subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +يمكن للمطورين [ إنشاء subgraph ](/developing/creating-a-subgraph/) لفهرسة البيانات على blockchain. الsubgraph هي تعليمات للمفهرسين حول البيانات التي يجب تقديمها للمستهلكين. -Once developers have built and tested their subgraph, they can [publish their subgraph](/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +بمجرد أن يقوم المطورون ببناء الsubgraph واختباره ، يمكنهم [ نشر الsubgraph ](/publishing/publishing-a-subgraph/) على الشبكة اللامركزية لـ The Graph. -### Querying an existing subgraph +### الاستعلام عن subgraph موجود -Once a subgraph is [published](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +بمجرد نشر الsubgraph [ ](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/) في الشبكة اللامركزية لـ The Graph ، يمكن لأي شخص إنشاء مفتاح API وإضافة GRT إلى رصيد الفواتير الخاص بهم ، والاستعلام عن الsubgraph. -Subgraphs are [queried using GraphQL](/querying/querying-the-graph/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. +يتم [ الاستعلام عن الSubgraph باستخدام GraphQL ](/querying/querying-the-graph/) ، ويتم دفع رسوم الاستعلام باستخدام GRT في [ Subgraph Studio ](https://thegraph.com/studio/). يتم توزيع رسوم الاستعلام على المشاركين في الشبكة بناءً على مساهماتهم في البروتوكول. -1% of the query fees paid to the network are burned. +يتم حرق 1٪ من رسوم الاستعلام المدفوعة للشبكة. -## Indexers (Earn GRT) +## المفهرسون (كسب GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +المفهرسين هم العمود الفقري لThe Graph. يعملون على أجهزة وبرامج مستقلة تشغل الشبكة اللامركزية لـ The Graph. يقدم المفهرسين البيانات للمستهلكين بناءً على تعليمات من الsubgraphs. -Indexers can earn GRT rewards in two ways: +يمكن للمفهرسين ربح مكافآت GRT بطريقتين: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. رسوم الاستعلام: المطورون أو المستخدمون يدفعون GRT مقابل استعلامات الsubgraph. يتم إيداع رسوم الاستعلام في حوض الخصم وتوزيعها على المفهرسين. -2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. +2. مكافآت الفهرسة: يتم توزيع 3% من الإصدار السنوي على المفهرسين بناءً على عدد الsubgraphs التي يقومون بفهرستها. هذه المكافآت تشجع المفهرسين على فهرسة الsubgraphs ، أحيانًا قبل البدء بفرض الرسوم على الاستعلامات ،يقوم المفهرسون بتجميع وتقديم أدلة فهرسة (POIs) للتحقق من دقة فهرسة البيانات التي قاموا بفهرستها. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +كل subgraph يخصص له جزء من إجمالي إصدار التوكن للشبكة بناءً على مقدار إشارة تنسيق الsubgraph. هذا المقدار يتم منحه للمفهرسين وفقا لحصصهم المخصصة على الـ subgraph. -In order to run an indexing node, Indexers must stake 100,000 GRT or more with the network. Indexers are incentivized to stake GRT in proportion to the amount of queries they serve. +من أجل تشغيل عقدة الفهرسة ، يجب أن يشارك المفهرسون برهن 100،000 GRT أو أكثر مع الشبكة. يتم تشجيع المفهرسين برهن GRT تتناسب مع عدد الاستعلامات التي يقدمونها. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial stake), they will not be able to use the additional GRT from Delegators until they increase their stake in the network. +يمكن للمفهرسين زيادة تخصيصاتهم من GRT على الsubgraph عن طريق قبول تفويض GRT من المفوضين ، ويمكنهم قبول ما يصل إلى 16 ضعف من رهانهم أو"حصتهم" الأولي. إذا أصبح المفهرس "مفوضا بشكل زائد" (أي أكثر من 16 ضعف من حصته الأولية) ، فلن يتمكن من استخدام GRT الإضافي من المفوضين حتى يزيد المفهرس حصته في الشبكة. -The amount of rewards an Indexer receives can vary based on the initial stake, accepted delegation, quality of service, and many more factors. The following chart is publicly available data from an active Indexer on The Graph's decentralized network. +يمكن أن يختلف مقدار المكافآت التي يتلقاها المفهرس بناءً على الحصة الأولية والتفويض المقبول وجودة الخدمة والعديد من العوامل الأخرى. الرسم البياني التالي هي بيانات عامة لمفهرس نشط في شبكة TheGraph اللامركزية. ### The Indexer stake & reward of allnodes-com.eth -![Indexing stake and rewards](/img/indexing-stake-and-income.png) +![حصص الفهرسة والمكافآت](/img/indexing-stake-and-income.png) -This data is from February 2021 to September 2022. +هذه البيانات من فبراير 2021 إلى سبتمبر 2022. -> Please note, this will improve when the [Arbitrum migration](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551) is complete, making gas costs a significantly lower burden for participating on the network. +> يرجى ملاحظة أن هذا سيتحسن عند اكتمال عملية الترحيل إلى [Arbitrum](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551)، مما يجعل تكاليف الغاز أقل بشكل كبير ويجعلها أقل عبئا للمشاركة في الشبكة. -## Token Supply: Burning & Issuance +## معروض التوكن: الحرق & الإصدار -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +يبلغ المعروض الأولي للتوكن 10 مليار GRT ، مع هدف إصدار جديد بنسبة 3٪ سنويًا لمكافأة المفهرسين الذين يخصصون حصصهم على الsubgraphs. هذا يعني أن إجمالي المعروض من توكن GRT سيزيد بنسبة 3٪ كل عام حيث يتم إصدار توكن جديد للمفهرسين تكريما لمساهمتهم في الشبكة. The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. ![مجموع عملة القراف المحروقة](/img/total-burned-grt.jpeg) -In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. +بالإضافة إلى أنشطة الحرق الدورية المذكورة، يتوفر في توكن GRT آلية القطع لمعاقبة المفهرسين المسؤولين عن سلوك ضار أو غير مسؤول. وفي حالة إعطائهم عقوبة القطع، يتم حرق 50% من مكافآتهم الخاصة بالفهرسة في فترة زمنية محددة (بينما يذهب النصف الآخر للصياد"fisherman")، ويتم خفض حصتهم الشخصية بنسبة 2.5%، ويتم حرق نصف هذا المبلغ. ويساعد ذلك على ضمان أن المفهرسين لديهم حافز قوي للعمل بما يخدم مصالح الشبكة والمساهمة في أمنها واستقرارها. -## Improving the Protocol +## تحسين البروتوكول -The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). +تتطور شبكة Graph باستمرار ويتم إجراء تحسينات على التصميم الاقتصادي للبروتوكول باستمرار لتوفير أفضل تجربة لجميع المشاركين في الشبكة. يشرف مجلس The Graph على تغييرات البروتوكول ويتم تشجيع أعضاء المجتمع على المشاركة. شارك في تحسينات البروتوكول في [ منتدى The Graph ](https://forum.thegraph.com/). diff --git a/website/pages/cs/_meta.js b/website/pages/cs/_meta.js new file mode 100644 index 000000000000..ac570f79abfc --- /dev/null +++ b/website/pages/cs/_meta.js @@ -0,0 +1,5 @@ +import meta from '../en/_meta.js' + +export default { + ...structuredClone(meta), +} diff --git a/website/pages/cs/about.mdx b/website/pages/cs/about.mdx new file mode 100644 index 000000000000..c1f7c886900f --- /dev/null +++ b/website/pages/cs/about.mdx @@ -0,0 +1,47 @@ +--- +title: About The Graph +--- + +This page will explain what The Graph is and how you can get started. + +## What is The Graph? + +The Graph is a decentralized protocol for indexing and querying blockchain data. The Graph makes it possible to query data that is difficult to query directly. + +Projects with complex smart contracts like [Uniswap](https://uniswap.org/) and NFTs initiatives like [Bored Ape Yacht Club](https://boredapeyachtclub.com/) store data on the Ethereum blockchain, making it really difficult to read anything other than basic data directly from the blockchain. + +In the case of Bored Ape Yacht Club, we can perform basic read operations on [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) like getting the owner of a certain Ape, getting the content URI of an Ape based on their ID, or the total supply, as these read operations are programmed directly into the smart contract, but more advanced real-world queries and operations like aggregation, search, relationships, and non-trivial filtering are not possible. For example, if we wanted to query for apes that are owned by a certain address, and filter by one of its characteristics, we would not be able to get that information by interacting directly with the contract itself. + +To get this data, you would have to process every single [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) event ever emitted, read the metadata from IPFS using the Token ID and IPFS hash, and then aggregate it. Even for these types of relatively simple questions, it would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer. + +You could also build out your own server, process the transactions there, save them to a database, and build an API endpoint on top of it all in order to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. + +**Indexing blockchain data is really, really hard.** + +Blockchain properties like finality, chain reorganizations, or uncled blocks complicate this process further, and make it not just time consuming but conceptually hard to retrieve correct query results from blockchain data. + +The Graph solves this with a decentralized protocol that indexes and enables the performant and efficient querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. Today, there is a hosted service as well as a decentralized protocol with the same capabilities. Both are backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node). + +## How The Graph Works + +The Graph learns what and how to index Ethereum data based on subgraph descriptions, known as the subgraph manifest. The subgraph description defines the smart contracts of interest for a subgraph, the events in those contracts to pay attention to, and how to map event data to data that The Graph will store in its database. + +Once you have written a `subgraph manifest`, you use the Graph CLI to store the definition in IPFS and tell the indexer to start indexing data for that subgraph. + +This diagram gives more detail about the flow of data once a subgraph manifest has been deployed, dealing with Ethereum transactions: + +![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) + +The flow follows these steps: + +1. A dapp adds data to Ethereum through a transaction on a smart contract. +2. The smart contract emits one or more events while processing the transaction. +3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. +4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. +5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. + +## Next Steps + +In the following sections we will go into more detail on how to define subgraphs, how to deploy them, and how to query data from the indexes that Graph Node builds. + +Before you start writing your own subgraph, you might want to have a look at the Graph Explorer and explore some of the subgraphs that have already been deployed. The page for each subgraph contains a playground that lets you query that subgraph's data with GraphQL. diff --git a/website/pages/cs/arbitrum/_meta.js b/website/pages/cs/arbitrum/_meta.js new file mode 100644 index 000000000000..321fe93849be --- /dev/null +++ b/website/pages/cs/arbitrum/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/arbitrum/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/arbitrum/arbitrum-faq.mdx b/website/pages/cs/arbitrum/arbitrum-faq.mdx new file mode 100644 index 000000000000..848f6eed7ab4 --- /dev/null +++ b/website/pages/cs/arbitrum/arbitrum-faq.mdx @@ -0,0 +1,78 @@ +--- +title: Arbitrum FAQ +--- + +Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. + +## Why is The Graph implementing an L2 Solution? + +By scaling The Graph on L2, network participants can expect: + +- Upwards of 26x savings on gas fees + +- Faster transaction speed + +- Security inherited from Ethereum + +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. + +The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. + +## What do I need to do to use The Graph on L2? + +Users bridge their GRT and ETH  using one of the following methods: + +- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [TransferTo](https://transferto.xyz/swap) +- [Connext Bridge](https://bridge.connext.network/) +- [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) + +To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. + +![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) + +## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? + +There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. + +Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. + +As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. + +## If I would like to participate in the network on L2, what should I do? + +Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/vtvv7FP). + +## Are there any risks associated with scaling the network to L2? + +All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). + +Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). + +## Will existing subgraphs on Ethereum continue to work? + +Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. + +## Will GRT have a new smart contract deployed on Arbitrum? + +Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. + +## Billing on Arbitrum FAQs + +## What do I need to do about the GRT in my billing balance? + +Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. + +## How do I know my funds have migrated securely to Arbitrum? + +All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). + +## How do I know the Arbitrum bridge is secure? + +The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. + +## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? + +Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. + +Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. diff --git a/website/pages/cs/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/cs/arbitrum/l2-transfer-tools-faq.mdx new file mode 100644 index 000000000000..581e2902576c --- /dev/null +++ b/website/pages/cs/arbitrum/l2-transfer-tools-faq.mdx @@ -0,0 +1,315 @@ +--- +title: L2 Transfer Tools FAQ +--- + +> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. + +## What are L2 Transfer Tools? + +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +## Can I use the same wallet I use on Ethereum mainnet? + +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. + +## Subgraph Transfer + +## How do I transfer my subgraph? + +To transfer your subgraph, you will need to complete the following steps: + +1. Initiate the transfer on Ethereum mainnet + +2. Wait 20 minutes for confirmation + +3. Confirm subgraph transfer on Arbitrum\* + +4. Finish publishing subgraph on Arbitrum + +5. Update Query URL (recommended) + +\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). + +## Where should I initiate my transfer from? + +You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. + +## How long do I need to wait until my subgraph is transferred + +The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. + +## Will my subgraph still be discoverable after I transfer it to L2? + +Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. + +## Does my subgraph need to be published to transfer it? + +To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. + +## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? + +After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. + +## After I transfer, do I also need to re-publish on Arbitrum? + +After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. + +## Will there be a down-time to my endpoint while re-publishing? + +There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. + +## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? + +Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. + +## Will my subgraph's curation move with my subgraph? + +If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. + +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. + +## Can I move my subgraph back to Ethereum mainnet after I transfer? + +Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. + +## Why do I need bridged ETH to complete my transfer? + +Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. + +## Curation Signal + +## How do I transfer my curation? + +To transfer your curation, you will need to complete the following steps: + +1. Initiate signal transfer on Ethereum mainnet + +2. Specify an L2 Curator address\* + +3. Wait 20 minutes for confirmation + +\*If necessary - i.e. you are using a contract address. + +## How will I know if the subgraph I curated has moved to L2? + +When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. + +## What if I do not wish to move my curation to L2? + +When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. + +## How do I know my curation successfully transferred? + +Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. + +## Can I transfer my curation on more than one subgraph at a time? + +There is no bulk transfer option at this time. + +## Indexer Stake + +## How do I transfer my stake to Arbitrum? + +To transfer your stake, you will need to complete the following steps: + +1. Initiate stake transfer on Ethereum mainnet + +2. Wait 20 minutes for confirmation + +3. Confirm stake transfer on Arbitrum + +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). + +## Will all of my stake transfer? + +You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. + +If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. + +Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. + +## How much time do I have to confirm my stake transfer to Arbitrum? + +\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. + +## What if I have open allocations? + +If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. + +## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? + +No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. + +## How long will it take to transfer my stake? + +It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. + +## Do I have to index on Arbitrum before I transfer my stake? + +You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. + +## Can Delegators move their delegation before I move my indexing stake? + +No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. + +## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? + +Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +## Delegation + +## How do I transfer my delegation? + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet + +2. Wait 20 minutes for confirmation + +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). + +## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? + +If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. + +## What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +## Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +## What if I can't find the Indexer I'm delegating to on L2? + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +## Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +## How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). + +## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +## Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +## Vesting Contract Transfer + +## How do I transfer my vesting contract? + +To transfer your vesting, you will need to complete the following steps: + +1. Initiate the vesting transfer on Ethereum mainnet + +2. Wait 20 minutes for confirmation + +3. Confirm vesting transfer on Arbitrum + +## How do I transfer my vesting contract if I am only partially vested? + +1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) + +2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + +3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. + +4. Withdraw any remaining ETH from the transfer tool contract + +## How do I transfer my vesting contract if I am fully vested? + +For those that are fully vested, the process is similar: + +1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) + +2. Set your L2 address with a call to the transfer tool contract + +3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. + +4. Withdraw any remaining ETH from the transfer tool contract + +## Can I transfer my vesting contract to Arbitrum? + +You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). + +When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. + +The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. + +Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. + +If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. + +## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? + +Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. + +## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? + +Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. + +## Can I specify a different beneficiary for my vesting contract on L2? + +Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. + +If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. + +## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? + +Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. + +This allows you to transfer your stake or delegation to any L2 address. + +## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? + +These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. + +To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: + +1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) + +2. Revoke protocol access to the vesting contract (needed for the next step) + +3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) + +4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet + +5. Wait 20 minutes for confirmation + +6. Confirm the balance transfer on L2 + +\*If necessary - i.e. you are using a contract address. + +\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). + +## Can I move my vesting contract back to L1? + +There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. + +## Why do I need to move my vesting contract to begin with? + +You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. + +## What happens if I try to cash out my contract when it is only partially vested? Is this possible? + +This is not a possibility. You can move funds back to L1 and withdraw them there. + +## What if I don't want to move my vesting contract to L2? + +You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/cs/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/cs/arbitrum/l2-transfer-tools-guide.mdx new file mode 100644 index 000000000000..28c6b7fc277e --- /dev/null +++ b/website/pages/cs/arbitrum/l2-transfer-tools-guide.mdx @@ -0,0 +1,165 @@ +--- +title: L2 Transfer Tools Guide +--- + +> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. + +The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. + +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. + +## How to transfer your subgraph to Arbitrum (L2) + +## Benefits of transferring your subgraphs + +The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. + +When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. + +## Understanding what happens with signal, your L1 subgraph and query URLs + +Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. + +When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. + +Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. + +As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. + +Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. + +## Choosing your L2 wallet + +When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. + +When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. + +If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. + +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. + +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** + +## Preparing for the transfer: bridging some ETH + +Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. + +You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. + +## Finding the subgraph Transfer Tool + +You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: + +![transfer tool](/img/L2-transfer-tool1.png) + +It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: + +![Transferring to L2](/img/transferToL2.png) + +Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. + +## Step 1: Starting the transfer + +Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). + +Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). + +After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). + +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. + +![Start the trnasfer to L2](/img/startTransferL2.png) + +## Step 2: Waiting for the subgraph to get to L2 + +After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). + +Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. + +![Wait screen](/img/screenshotOfWaitScreenL2.png) + +## Step 3: Confirming the transfer + +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. + +If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. + +![Confirm the transfer to L2](/img/confirmTransferToL2.png) + +## Step 4: Finishing the transfer on L2 + +At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." + +![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) + +![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) + +This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. + +## Step 5: Updating the query URL + +Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : + +`https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` + +Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. + +## How to transfer your curation to Arbitrum (L2) + +## Understanding what happens to curation on subgraph transfers to L2 + +When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. + +This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. + +A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. + +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. + +## Choosing your L2 wallet + +If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. + +If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. + +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. + +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** + +## Sending curation to L2: Step 1 + +Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. + +If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. + +When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. + +![Transfer signal](/img/transferSignalL2TransferTools.png) + +After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. + +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. + +## Sending curation to L2: step 2 + +Starting the transfer: + +![Send signal to L2](/img/sendingCurationToL2Step2First.png) + +After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). + +Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. + +![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) + +## Sending curation to L2: step 3 + +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. + +If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. + +![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) + +## Withdrawing your curation on L1 + +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. diff --git a/website/pages/cs/billing.mdx b/website/pages/cs/billing.mdx new file mode 100644 index 000000000000..3c21e5de1cdc --- /dev/null +++ b/website/pages/cs/billing.mdx @@ -0,0 +1,158 @@ +--- +title: Billing +--- + +> Invoices are generated on a weekly basis. + +There are two options for paying for your query fees: + +- [Paying with fiat currency with Banxa](#billing-with-banxa) +- [Paying with crypto wallet](#billing-on-arbitrum) + +## Billing with Banxa + +Banxa enables you to bypass the need for an exchange and pay for your query fees using the fiat currency of your choice. The fiat currency will be converted to GRT, added to your account balance on the billing contract, and used to pay for queries associated with your API keys. + +There may be KYC requirements depending on the regulations in your country. For more information about KYC, please visit [Banxa's FAQ page](https://docs.banxa.com/docs/faqs). + +You can learn more about Banxa by reading their [documentation](https://docs.banxa.com/docs). + +### Paying for query fees with Banxa + +1. Select “Pay with Card” option in [Subgraph Studio](https://thegraph.com/studio/billing/?show=Deposit). +2. Enter the amount of GRT to be added to your account balance. +3. Click the 'Continue with Banxa' button. +4. Enter necessary banking information on Banxa including payment method & fiat currency of choice. +5. Finish the transaction. + +It may take up to 10 minutes to complete the transaction. Once the transaction is confirmed, the purchased GRT will automatically be added to your account balance on Arbitrum. + +## Billing on Arbitrum + +While The Graph protocol operates on Ethereum Mainnet, [the billing contract](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) lives on the [Arbitrum](https://arbitrum.io/) network to reduce transaction times and cost. You'll be required to pay the query fees generated from your API keys. Using the billing contract, you'll be able to: + +- Add and withdraw GRT from your account balance. +- Keep track of your balances based on how much GRT you have added to your account balance, how much you have removed, and your invoices. +- Automatically pay invoices based on query fees generated, as long as there is enough GRT in your account balance. + +### Adding GRT using a crypto wallet + +> This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). + +2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". + +3. Click the 'Add GRT' button at the center of the page. A side panel will appear. + +4. Enter the amount of GRT you want to add to your account balance. You can also select the maximum amount of GRT you want to add to your account balance by clicking on the "Max" button. + +5. Click 'Allow GRT Access' to allow the Subgraph Studio to access your GRT. Sign the associated transaction in your wallet. This will not cost any gas. + +6. Click 'Add GRT to account balance' to add the GRT to your account balance. Sign the associated transaction in your wallet. This will cost gas. + +7. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. + +### Withdrawing GRT using a crypto wallet + +> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on the Arbitrum network. + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). + +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". + +3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. + +4. Enter the amount of GRT you would like to withdraw. + +5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. The GRT will be sent to your Arbitrum wallet. + +6. Once the transaction is confirmed, you'll see the GRT withdrawn from your account balance in your Arbitrum wallet. + +### Adding GRT using a multisig wallet + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). + +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. + +3. Click the 'Add GRT' button at the center of the page. A side panel will appear. + +4. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. + +### Withdrawing GRT using a multisig wallet + +> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on Ethereum mainnet. + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). + +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". + +3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. + +4. Enter the amount of GRT you would like to withdraw. Specify the receiving wallet which will receive the GRT from this transaction. The GRT will be sent to the receiving wallet on Arbitrum. + +5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. + +6. Once the transaction is confirmed, you'll see the GRT added to your Arbitrum wallet within an hour. + +## Getting GRT + +This section will show you how to get GRT to pay for query fees. + +### Coinbase + +This will be a step by step guide for purchasing GRT on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select GRT. +5. Select the payment method. Select your preferred payment method. +6. Select the amount of GRT you want to purchase. +7. Review your purchase. Review your purchase and click "Buy GRT". +8. Confirm your purchase. Confirm your purchase and you will have successfully purchased GRT. +9. You can transfer the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the GRT to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the GRT account. + - Enter the amount of GRT you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. -Please note that for larger purchase amounts, Coinbase may require you to wait 7-10 days before transferring the full amount to a crypto wallet. + +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing GRT on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy Now" button on the homepage banner. +4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. +5. Select your preferred payment method. You'll be able to pay with different fiat currencies such as Euros, US Dollars, and more. +6. Select the amount of GRT you want to purchase. +7. Review your purchase and click "Buy GRT". +8. Confirm your purchase and you will be able to see your GRT in your Binance Spot Wallet. +9. You can withdraw the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your crypto wallet, add your crypto wallet's address to the withdrawel whitelist. + - Click on the "wallet" button, click withdraw, and select GRT. + - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + +### Uniswap + +This is how you can purchase GRT on Uniswap. + +1. Go to [Uniswap](https://app.uniswap.org/#/swap) and connect your wallet. +2. Select the token you want to swap from. Select ETH. +3. Select the token you want to swap to. Select GRT. + - Make sure you're swapping for the correct token. The GRT smart contract address is: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` +4. Enter the amount of ETH you want to swap. +5. Click "Swap". +6. Confirm the transaction in your wallet and you wait for the transaction to process. + +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Arbitrum Bridge + +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/cs/cookbook/_meta.js b/website/pages/cs/cookbook/_meta.js new file mode 100644 index 000000000000..7fc5602ab4d2 --- /dev/null +++ b/website/pages/cs/cookbook/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/cookbook/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/cookbook/arweave.mdx b/website/pages/cs/cookbook/arweave.mdx new file mode 100644 index 000000000000..319ad2f760a7 --- /dev/null +++ b/website/pages/cs/cookbook/arweave.mdx @@ -0,0 +1,239 @@ +--- +title: Building Subgraphs on Arweave +--- + +> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/rC8rBuRtbH) with any questions about building Arweave subgraphs! + +In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. + +## What is Arweave? + +The Arweave protocol allows developers to store data permanently and that is the main difference between Arweave and IPFS, where IPFS lacks the feature; permanence, and files stored on Arweave can't be changed or deleted. + +Arweave already has built numerous libraries for integrating the protocol in a number of different programming languages. For more information you can check: + +- [Arwiki](https://arwiki.wiki/#/en/main) +- [Arweave Resources](https://www.arweave.org/build) + +## What are Arweave Subgraphs? + +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). + +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. + +## Building an Arweave Subgraph + +To be able to build and deploy Arweave Subgraphs, you need two packages: + +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. + +## Subgraph's components + +There are three components of a subgraph: + +### 1. Manifest - `subgraph.yaml` + +Defines the data sources of interest, and how they should be processed. Arweave is a new kind of data source. + +### 2. Schema - `schema.graphql` + +Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. + +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). + +### 3. AssemblyScript Mappings - `mapping.ts` + +This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. + +During subgraph development there are two key commands: + +``` +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +``` + +## Subgraph Manifest Definition + +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: + +```yaml +specVersion: 0.0.5 +description: Arweave Blocks Indexing +schema: + file: ./schema.graphql # link to the schema file +dataSources: + - kind: arweave + name: arweave-blocks + network: arweave-mainnet # The Graph only supports Arweave Mainnet + source: + owner: 'ID-OF-AN-OWNER' # The public key of an Arweave wallet + startBlock: 0 # set this to 0 to start indexing from chain genesis + mapping: + apiVersion: 0.0.5 + language: wasm/assemblyscript + file: ./src/blocks.ts # link to the file with the Assemblyscript mappings + entities: + - Block + - Transaction + blockHandlers: + - handler: handleBlock # the function name in the mapping file + transactionHandlers: + - handler: handleTx # the function name in the mapping file +``` + +- Arweave subgraphs introduce a new kind of data source (`arweave`) +- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet + +Arweave data sources support two types of handlers: + +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` + +> The source.owner can be the owner's address, or their Public Key. + +> Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. + +> Note: [Bundlr](https://bundlr.network/) transactions are not supported yet. + +## Schema Definition + +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). + +## AssemblyScript Mappings + +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). + +Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). + +```tsx +class Block { + timestamp: u64 + lastRetarget: u64 + height: u64 + indepHash: Bytes + nonce: Bytes + previousBlock: Bytes + diff: Bytes + hash: Bytes + txRoot: Bytes + txs: Bytes[] + walletList: Bytes + rewardAddr: Bytes + tags: Tag[] + rewardPool: Bytes + weaveSize: Bytes + blockSize: Bytes + cumulativeDiff: Bytes + hashListMerkle: Bytes + poa: ProofOfAccess +} + +class Transaction { + format: u32 + id: Bytes + lastTx: Bytes + owner: Bytes + tags: Tag[] + target: Bytes + quantity: Bytes + data: Bytes + dataSize: Bytes + dataRoot: Bytes + signature: Bytes + reward: Bytes +} +``` + +Block handlers receive a `Block`, while transactions receive a `Transaction`. + +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). + +## Deploying an Arweave Subgraph on the Hosted Service + +Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. + +```bash +graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token +``` + +## Querying an Arweave Subgraph + +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. + +## Example Subgraphs + +Here is an example subgraph for reference: + +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) + +## FAQ + +### Can a subgraph index Arweave and other chains? + +No, a subgraph can only support data sources from one chain/network. + +### Can I index the stored files on Arweave? + +Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). + +### Can I identify Bundlr bundles in my subgraph? + +This is not currently supported. + +### How can I filter transactions to a specific account? + +The source.owner can be the user's public key or account address. + +### What is the current encryption format? + +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). + +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: + +``` +const base64Alphabet = [ + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" +]; + +const base64UrlAlphabet = [ + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" +]; + +function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; +} +``` diff --git a/website/pages/cs/cookbook/base-testnet.mdx b/website/pages/cs/cookbook/base-testnet.mdx new file mode 100644 index 000000000000..b1e3a4fc7c6d --- /dev/null +++ b/website/pages/cs/cookbook/base-testnet.mdx @@ -0,0 +1,112 @@ +--- +title: Building Subgraphs on Base +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph on Base testnet. + +What you'll need: + +- A Base testnet contract address +- A crypto wallet (e.g. MetaMask or Coinbase Wallet) + +## Subgraph Studio + +### 1. Install the Graph CLI + +The Graph CLI (>=v0.41.0) is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +```sh +# NPM +npm install -g @graphprotocol/graph-cli + +# Yarn +yarn global add @graphprotocol/graph-cli +``` + +### 2. Create your subgraph in the Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your crypto wallet. + +Once connected, click "Create a Subgraph" and enter a name for your subgraph. + +Select "Base (testnet)" as the indexed blockchain and click Create Subgraph. + +### 3. Initialize your Subgraph + +> You can find specific commands for your subgraph in the Subgraph Studio. + +Make sure that the graph-cli is updated to latest (above 0.41.0) + +```sh +graph --version +``` + +Initialize your subgraph from an existing contract. + +```sh +graph init --studio +``` + +Your subgraph slug is an identifier for your subgraph. The CLI tool will walk you through the steps for creating a subgraph, including: + +- Protocol: ethereum +- Subgraph slug: `` +- Directory to create the subgraph in: `` +- Ethereum network: base-testnet \_ Contract address: `` +- Start block (optional) +- Contract name: `` +- Yes/no to indexing events (yes means your subgraph will be bootstrapped with entities in the schema and simple mappings for emitted events) + +### 3. Write your Subgraph + +> If emitted events are the only thing you want to index, then no additional work is required, and you can skip to the next step. + +The previous command creates a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. Make sure to add `base-testnet` as the network name in manifest file to deploy your subgraph on Base testnet. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retreive from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +If you want to index additional data, you will need extend the manifest, schema and mappings. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +### 4. Deploy to the Subgraph Studio + +Before you can deploy your subgraph, you will need to authenticate with the Subgraph Studio. You can do this by running the following command: + +Authenticate the subgraph on studio + +``` +graph auth --studio +``` + +Next, enter your subgraph's directory. + +``` + cd +``` + +Build your subgraph with the following command: + +```` +``` +graph codegen && graph build +``` +```` + +Finally, you can deploy your subgraph using this command: + +```` +``` +graph deploy --studio +``` +```` + +### 5. Query your subgraph + +Once your subgraph is deployed, you can query it from your dapp using the `Development Query URL` in the Subgraph Studio. + +Note - Studio API is rate-limited. Hence should preferably be used for development and testing. + +To learn more about querying data from your subgraph, see the [Querying a Subgraph](/querying/querying-the-graph) page. diff --git a/website/pages/cs/cookbook/cosmos.mdx b/website/pages/cs/cookbook/cosmos.mdx new file mode 100644 index 000000000000..ef21e4bc0855 --- /dev/null +++ b/website/pages/cs/cookbook/cosmos.mdx @@ -0,0 +1,259 @@ +--- +title: Building Subgraphs on Cosmos +--- + +This guide is an introduction on building subgraphs indexing [Cosmos](https://docs.cosmos.network/) based blockchains. + +## What are Cosmos subgraphs? + +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. + +There are four types of handlers supported in Cosmos subgraphs: + +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. + +Based on the [official Cosmos documentation](https://docs.cosmos.network/): + +> [Events](https://docs.cosmos.network/main/core/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. + +> [Transactions](https://docs.cosmos.network/main/core/transactions) are objects created by end-users to trigger state changes in the application. + +> [Messages](https://docs.cosmos.network/main/core/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. + +Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. + +## Building a Cosmos subgraph + +### Subgraph Dependencies + +[graph-cli](https://github.com/graphprotocol/graph-cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. + +[graph-ts](https://github.com/graphprotocol/graph-ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. + +### Subgraph Main Components + +There are three key parts when it comes to defining a subgraph: + +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. + +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. + +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. + +### Subgraph Manifest Definition + +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: + +```yaml +specVersion: 0.0.5 +description: Cosmos Subgraph Example +schema: + file: ./schema.graphql # link to the schema file +dataSources: + - kind: cosmos + name: CosmosHub + network: cosmoshub-4 # This will change for each cosmos-based blockchain. In this case, the example uses the Cosmos Hub mainnet. + source: + startBlock: 0 # Required for Cosmos, set this to 0 to start indexing from chain genesis + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + blockHandlers: + - handler: handleNewBlock # the function name in the mapping file + eventHandlers: + - event: rewards # the type of the event that will be handled + handler: handleReward # the function name in the mapping file + transactionHandlers: + - handler: handleTransaction # the function name in the mapping file + messageHandlers: + - message: /cosmos.staking.v1beta1.MsgDelegate # the type of a message + handler: handleMsgDelegate # the function name in the mapping file + file: ./src/mapping.ts # link to the file with the Assemblyscript mappings +``` + +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. + +### Schema Definition + +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graph-ql-schema). + +### AssemblyScript Mappings + +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). + +Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). + +```tsx +class Block { + header: Header + evidence: EvidenceList + resultBeginBlock: ResponseBeginBlock + resultEndBlock: ResponseEndBlock + transactions: Array + validatorUpdates: Array +} + +class EventData { + event: Event + block: HeaderOnlyBlock + tx: TransactionContext +} + +class TransactionData { + tx: TxResult + block: HeaderOnlyBlock +} + +class MessageData { + message: Any + block: HeaderOnlyBlock + tx: TransactionContext +} + +class TransactionContext { + hash: Bytes + index: u32 + code: u32 + gasWanted: i64 + gasUsed: i64 +} + +class HeaderOnlyBlock { + header: Header +} + +class Header { + version: Consensus + chainId: string + height: u64 + time: Timestamp + lastBlockId: BlockID + lastCommitHash: Bytes + dataHash: Bytes + validatorsHash: Bytes + nextValidatorsHash: Bytes + consensusHash: Bytes + appHash: Bytes + lastResultsHash: Bytes + evidenceHash: Bytes + proposerAddress: Bytes + hash: Bytes +} + +class TxResult { + height: u64 + index: u32 + tx: Tx + result: ResponseDeliverTx + hash: Bytes +} + +class Event { + eventType: string + attributes: Array +} + +class Any { + typeUrl: string + value: Bytes +} +``` + +Each handler type comes with its own data structure that is passed as an argument to a mapping function. + +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. + +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). + +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). + +### Message decoding + +It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://developers.google.com/protocol-buffers/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. + +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). + +## Creating and building a Cosmos subgraph + +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: + +```bash +$ graph codegen +``` + +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: + +```bash +$ graph build +``` + +## Deploying a Cosmos subgraph + +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: + +**Hosted Service** + +```bash +graph create account/subgraph-name --product hosted-service +``` + +```bash +graph deploy account/subgraph-name --product hosted-service +``` + +**Local Graph Node (based on default configuration):** + +```bash +graph create subgraph-name --node http://localhost:8020 +``` + +```bash +graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 +``` + +## Querying a Cosmos subgraph + +The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. + +## Supported Cosmos Blockchains + +### Cosmos Hub + +#### What is Cosmos Hub? + +The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. + +#### Networks + +Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. + +### Osmosis + +> Osmosis support in Graph Node and on the Hosted Service is in beta: please contact the graph team with any questions about building Osmosis subgraphs! + +#### What is Osmosis? + +[Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. + +#### Networks + +Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. + +## Example Subgraphs + +Here are some example subgraphs for reference: + +[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) + +[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) + +[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) + +[Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) diff --git a/website/pages/cs/cookbook/grafting.mdx b/website/pages/cs/cookbook/grafting.mdx new file mode 100644 index 000000000000..54ad7a0eaff8 --- /dev/null +++ b/website/pages/cs/cookbook/grafting.mdx @@ -0,0 +1,186 @@ +--- +title: Replace a Contract and Keep its History With Grafting +--- + +In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. + +## What is Grafting? + +Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. + +The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: + +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented + +For more information, you can check: + +- [Grafting](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) + +In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. + +## Building an Existing Subgraph + +Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: + +- [Subgraph example repo](https://github.com/t-proctor/grafting-tutorial) + +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). + +## Subgraph Manifest Definition + +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: + +```yaml +specVersion: 0.0.4 +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum + name: Lock + network: goerli + source: + address: '0x4Ed995e775D3629b0566D2279f058729Ae6EA493' + abi: Lock + startBlock: 7674603 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts +``` + +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract +- The network should correspond to a indexed network being queried. Since we're running on Goerli testnet, the network is `goerli` +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. + +## Grafting Manifest Definition + +Grafting requires adding two new items to the original subgraph manifest: + +```yaml +--- +features: + - grafting # feature name +graft: + base: Qm... # subgraph ID of base subgraph + block: 1502122 # block number +``` + +- `features:` is a list of all used [feature names](developing/creating-a-subgraph/#experimental-features). +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. + +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting + +## Deploying the Base Subgraph + +1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo +3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground + +```graphql +{ + withdrawals(first: 5) { + id + amount + when + } +} +``` + +It returns something like this: + +``` +{ + "data": { + "withdrawals": [ + { + "id": "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", + "amount": "0", + "when": "1664367528" + }, + { + "id": "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", + "amount": "0", + "when": "1664367648" + } + ] + } +} +``` + +Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. + +## Deploying the Grafting Subgraph + +The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. + +1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in The Graph Studio UI. +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo +4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground + +```graphql +{ + withdrawals(first: 5) { + id + amount + when + } +} +``` + +It should return the following: + +``` +{ + "data": { + "withdrawals": [ + { + "id": "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", + "amount": "0", + "when": "1664367528" + }, + { + "id": "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", + "amount": "0", + "when": "1664367648" + }, + { + "id": "0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3-22", + "amount": "0", + "when": "1664371512" + } + ] + } +} +``` + +You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) and [Event 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). The new contract emitted one `Withdrawal` after, [Event 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. + +Congrats! You have succesfully grafted a subgraph onto another subgraph. + +## Additional Resources + +If you want more experience with grafting, here's a few examples for popular contracts: + +- [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/templates/curve.template.yaml) +- [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) +- [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3/protocols/uniswap-v3/config/templates/uniswap.v3.template.yaml), + +To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](developing/creating-a-subgraph/#data-source-templates) can achieve similar results + +> Note: A lot of material from this article was taken from the previously published [Arweave article](/cookbook/arweave/) diff --git a/website/pages/cs/cookbook/near.mdx b/website/pages/cs/cookbook/near.mdx new file mode 100644 index 000000000000..6c2f253187ca --- /dev/null +++ b/website/pages/cs/cookbook/near.mdx @@ -0,0 +1,284 @@ +--- +title: Building Subgraphs on NEAR +--- + +> NEAR support in Graph Node and on the Hosted Service is in beta: please contact near@thegraph.com with any questions about building NEAR subgraphs! + +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). + +## What is NEAR? + +[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/docs/concepts/new-to-near) for more information. + +## What are NEAR subgraphs? + +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. + +Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: + +- Block handlers: these are run on every new block +- Receipt handlers: run every time a message is executed at a specified account + +[From the NEAR documentation](https://docs.near.org/docs/concepts/transaction#receipt): + +> A Receipt is the only actionable object in the system. When we talk about "processing a transaction" on the NEAR platform, this eventually means "applying receipts" at some point. + +## Building a NEAR Subgraph + +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. + +`@graphprotocol/graph-ts` is a library of subgraph-specific types. + +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. + +> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. + +There are three aspects of subgraph definition: + +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. + +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph#the-graphql-schema). + +**AssemblyScript Mappings:** [AssemblyScript code](/developing/assemblyscript-api) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. + +During subgraph development there are two key commands: + +```bash +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +``` + +### Subgraph Manifest Definition + +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: + +```yaml +specVersion: 0.0.2 +schema: + file: ./src/schema.graphql # link to the schema file +dataSources: + - kind: near + network: near-mainnet + source: + account: app.good-morning.near # This data source will monitor this account + startBlock: 10662188 # Required for NEAR + mapping: + apiVersion: 0.0.5 + language: wasm/assemblyscript + blockHandlers: + - handler: handleNewBlock # the function name in the mapping file + receiptHandlers: + - handler: handleReceipt # the function name in the mapping file + file: ./src/mapping.ts # link to the file with the Assemblyscript mappings +``` + +- NEAR subgraphs introduce a new `kind` of data source (`near`) +- The `network` should correspond to a network on the hosting Graph Node. On the Hosted Service, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` +- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/docs/concepts/account). This can be an account or a sub-account. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. + +```yaml +accounts: + prefixes: + - app + - good + suffixes: + - morning.near + - morning.testnet +``` + +NEAR data sources support two types of handlers: + +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. +- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/docs/concepts/account#subaccounts) must be added as independent data sources). + +### Schema Definition + +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph#the-graphql-schema). + +### AssemblyScript Mappings + +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). + +NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/developing/assemblyscript-api). + +```typescript + +class ExecutionOutcome { + gasBurnt: u64, + blockHash: Bytes, + id: Bytes, + logs: Array, + receiptIds: Array, + tokensBurnt: BigInt, + executorId: string, + } + +class ActionReceipt { + predecessorId: string, + receiverId: string, + id: CryptoHash, + signerId: string, + gasPrice: BigInt, + outputDataReceivers: Array, + inputDataIds: Array, + actions: Array, + } + +class BlockHeader { + height: u64, + prevHeight: u64,// Always zero when version < V3 + epochId: Bytes, + nextEpochId: Bytes, + chunksIncluded: u64, + hash: Bytes, + prevHash: Bytes, + timestampNanosec: u64, + randomValue: Bytes, + gasPrice: BigInt, + totalSupply: BigInt, + latestProtocolVersion: u32, + } + +class ChunkHeader { + gasUsed: u64, + gasLimit: u64, + shardId: u64, + chunkHash: Bytes, + prevBlockHash: Bytes, + balanceBurnt: BigInt, + } + +class Block { + author: string, + header: BlockHeader, + chunks: Array, + } + +class ReceiptWithOutcome { + outcome: ExecutionOutcome, + receipt: ActionReceipt, + block: Block, + } +``` + +These types are passed to block & receipt handlers: + +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` + +Otherwise, the rest of the [AssemblyScript API](/developing/assemblyscript-api) is available to NEAR subgraph developers during mapping execution. + +This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/developing/assemblyscript-api#json-api) to allow developers to easily process these logs. + +## Deploying a NEAR Subgraph + +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). + +The Graph's Hosted Service currently supports indexing NEAR mainnet and testnet in beta, with the following network names: + +- `near-mainnet` +- `near-testnet` + +More information on creating and deploying subgraphs on the Hosted Service can be found [here](/deploying/deploying-a-subgraph-to-hosted). + +As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On the Hosted Service, this can be done from [your Dashboard](https://thegraph.com/hosted-service/dashboard): "Add Subgraph". + +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: + +```sh +$ graph create --node subgraph/name # creates a subgraph on a local Graph Node (on the Hosted Service, this is done via the UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +``` + +The node configuration will depend on where the subgraph is being deployed. + +### Hosted Service + +```sh +graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token +``` + +### Local Graph Node (based on default configuration) + +```sh +graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 +``` + +Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: + +```graphql +{ + _meta { + block { + number + } + } +} +``` + +### Indexing NEAR with a Local Graph Node + +Running a Graph Node that indexes NEAR has the following operational requirements: + +- NEAR Indexer Framework with Firehose instrumentation +- NEAR Firehose Component(s) +- Graph Node with Firehose endpoint configured + +We will provide more information on running the above components soon. + +## Querying a NEAR Subgraph + +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api) for more information. + +## Example Subgraphs + +Here are some example subgraphs for reference: + +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) + +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) + +## FAQ + +### How does the beta work? + +NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! + +### Can a subgraph index both NEAR and EVM chains? + +No, a subgraph can only support data sources from one chain/network. + +### Can subgraphs react to more specific triggers? + +Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. + +### Will receipt handlers trigger for accounts and their sub-accounts? + +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: + +```yaml +accounts: + suffixes: + - mintbase1.near +``` + +### Can NEAR subgraphs make view calls to NEAR accounts during mappings? + +This is not supported. We are evaluating whether this functionality is required for indexing. + +### Can I use data source templates in my NEAR subgraph? + +This is not currently supported. We are evaluating whether this functionality is required for indexing. + +### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? + +Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. + +### My question hasn't been answered, where can I get more help building NEAR subgraphs? + +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/vtvv7FP) and ask in the #near channel or email near@thegraph.com. + +## References + +- [NEAR developer documentation](https://docs.near.org/docs/develop/basics/getting-started) diff --git a/website/pages/cs/cookbook/quick-start.mdx b/website/pages/cs/cookbook/quick-start.mdx new file mode 100644 index 000000000000..123caf840497 --- /dev/null +++ b/website/pages/cs/cookbook/quick-start.mdx @@ -0,0 +1,164 @@ +--- +title: Quick Start +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the Subgraph Studio [https://thegraph.com/studio/](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +```sh +# NPM +$ npm install -g @graphprotocol/graph-cli + +# Yarn +$ yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/cs/cookbook/subgraph-debug-forking.mdx b/website/pages/cs/cookbook/subgraph-debug-forking.mdx new file mode 100644 index 000000000000..7ac3bf96ca10 --- /dev/null +++ b/website/pages/cs/cookbook/subgraph-debug-forking.mdx @@ -0,0 +1,102 @@ +--- +title: Quick and Easy Subgraph Debugging Using Forks +--- + +As with many systems processing large amounts of data, The Graph's Indexers (Graph nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! + +## Ok, what is it? + +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). + +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. + +## What?! How? + +When you deploy a subgraph to a remote Graph node for indexing and it fails at block _X_, the good news is that the Graph node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. + +In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. + +## Please, show me some code! + +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. + +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: + +```tsx +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id.toHex().toString()) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleUpdatedGravatar(event: UpdatedGravatar): void { + let gravatar = Gravatar.load(event.params.id.toI32().toString()) + if (gravatar == null) { + log.critical('Gravatar not found!', []) + return + } + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} +``` + +Oops, how unfortunate, when I deploy my perfect looking subgraph to the [Hosted Service](https://thegraph.com/hosted-service/) it fails with the _"Gravatar not found!"_ error. + +The usual way to attempt a fix is: + +1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). +2. Re-deploy the subgraph to the [Hosted Service](https://thegraph.com/hosted-service/) (or another remote Graph node). +3. Wait for it to sync-up. +4. If it breaks again go back to 1, otherwise: Hooray! + +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ + +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: + +0. Spin-up a local Graph node with the **_appropriate fork-base_** set. +1. Make a change in the mappings source, which you believe will solve the issue. +2. Deploy to the local Graph node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +3. If it breaks again, go back to 1, otherwise: Hooray! + +Now, you may have 2 questions: + +1. fork-base what??? +2. Forking who?! + +And I answer: + +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. +2. Forking is easy, no need to sweat: + +```bash +$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 +``` + +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! + +So, here is what I do: + +0. I spin-up a local graph node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from the [HostedService](https://thegraph.com/hosted-service/). + +``` +$ cargo run -p graph-node --release -- \ + --postgres-url postgresql://USERNAME[:PASSWORD]@localhost:5432/graph-node \ + --ethereum-rpc NETWORK_NAME:[CAPABILITIES]:URL \ + --ipfs 127.0.0.1:5001 + --fork-base https://api.thegraph.com/subgraphs/id/ +``` + +1. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. +2. After I made the changes I deploy my subgraph to the local Graph node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: + +```bash +$ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 +``` + +3. I inspect the logs produced by the local Graph node and, Hooray!, everything seems to be working. +4. I deploy my now bug-free subgraph to a remote Graph node and live happily ever after! (no potatoes tho) +5. The end... diff --git a/website/pages/cs/cookbook/subgraph-uncrashable.mdx b/website/pages/cs/cookbook/subgraph-uncrashable.mdx new file mode 100644 index 000000000000..989310a3f9a0 --- /dev/null +++ b/website/pages/cs/cookbook/subgraph-uncrashable.mdx @@ -0,0 +1,29 @@ +--- +title: Safe Subgraph Code Generator +--- + +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. + +## Why integrate with Subgraph Uncrashable? + +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. + +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. + +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. + +**Key Features** + +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. + +- The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. + +- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. These logs can be viewed in the The Graph's hosted service under the 'Logs' section. + +Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. + +```sh +graph codegen -u [options] [] +``` + +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. diff --git a/website/pages/cs/cookbook/substreams-powered-subgraphs.mdx b/website/pages/cs/cookbook/substreams-powered-subgraphs.mdx new file mode 100644 index 000000000000..8439209fd6f9 --- /dev/null +++ b/website/pages/cs/cookbook/substreams-powered-subgraphs.mdx @@ -0,0 +1,227 @@ +--- +title: Substreams-powered subgraphs +--- + +[Substreams](/substreams/README) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. + +## Requirements + +This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): + +``` +npm install -g @graphprotocol/graph-cli +``` + +## Get the cookbook + +> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). + +``` +graph init --from-example substreams-powered-subgraph +``` + +## Defining a Substreams package + +A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams/README), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. + +The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): + +```proto +syntax = "proto3"; + +package example; + +message Contracts { + repeated Contract contracts = 1; +} + +message Contract { + string address = 1; + uint64 blockNumber = 2; + string timestamp = 3; + uint64 ordinal = 4; +} +``` + +The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: + +``` +#[substreams::handlers::map] +fn map_contract(block: eth::v2::Block) -> Result { + let contracts = block + .transactions() + .flat_map(|tx| { + tx.calls + .iter() + .filter(|call| !call.state_reverted) + .filter(|call| call.call_type == eth::v2::CallType::Create as i32) + .map(|call| Contract { + address: format!("0x{}", Hex(&call.address)), + block_number: block.number, + timestamp: block.timestamp_seconds().to_string(), + ordinal: tx.begin_ordinal, + }) + }) + .collect(); + Ok(Contracts { contracts }) +} +``` + +A Substreams package can be used by a subgraph as long as it has a module which outputs compatible entity changes. The example Substreams package has an additional `graph_out` module in `lib.rs` which returns a `substreams_entity_change::pb::entity::EntityChanges` output, which can be processed by Graph Node. + +> The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. + +``` +#[substreams::handlers::map] +pub fn graph_out(contracts: Contracts) -> Result { + // hash map of name to a table + let mut tables = Tables::new(); + + for contract in contracts.contracts.into_iter() { + tables + .create_row("Contract", contract.address) + .set("timestamp", contract.timestamp) + .set("blockNumber", contract.block_number); + } + + Ok(tables.to_entity_changes()) +} +``` + +These types and modules are pulled together in `substreams.yaml`: + +``` +specVersion: v0.1.0 +package: + name: 'substreams_test' # the name to be used in the .spkg + version: v1.0.1 # the version to use when creating the .spkg + +imports: # dependencies + entity: https://github.com/streamingfast/substreams-entity-change/releases/download/v0.2.1/substreams-entity-change-v0.2.1.spkg + +protobuf: # specifies custom types for use by Substreams modules + files: + - example.proto + importPaths: + - ./proto + +binaries: + default: + type: wasm/rust-v1 + file: ./target/wasm32-unknown-unknown/release/substreams.wasm + +modules: # specify modules with their inputs and outputs. + - name: map_contract + kind: map + inputs: + - source: sf.ethereum.type.v2.Block + output: + type: proto:test.Contracts + + - name: graph_out + kind: map + inputs: + - map: map_contract + output: + type: proto:substreams.entity.v1.EntityChanges # this type can be consumed by Graph Node + +``` + +You can check the overall "flow" from a Block, to `map_contract` to `graph_out` by running `substreams graph`: + +```mermaid +graph TD; + map_contract[map: map_contract]; + sf.ethereum.type.v2.Block[source: sf.ethereum.type.v2.Block] --> map_contract; + graph_out[map: graph_out]; + map_contract --> graph_out; +``` + +To prepare this Substreams package for consumption by a subgraph, you must run the following commands: + +```bash +yarn substreams:protogen # generates types in /src/pb +yarn substreams:build # builds the substreams +yarn substreams:package # packages the substreams in a .spkg file + +# alternatively, yarn substreams:prepare calls all of the above commands +``` + +> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands + +This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. + +> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. + +## Defining a Substreams-powered subgraph + +Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. + +This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. + +> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). + +```yaml +specVersion: 0.0.4 +description: Ethereum Contract Tracking Subgraph (powered by Substreams) +repository: https://github.com/graphprotocol/graph-tooling +schema: + file: schema.graphql +dataSources: + - kind: substreams + name: substream_test + network: mainnet + source: + package: + moduleName: graph_out + file: substreams-test-v1.0.1.spkg + mapping: + kind: substreams/graph-entities + apiVersion: 0.0.5 +``` + +The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. + +```graphql +type Contract @entity { + id: ID! + + "The timestamp when the contract was deployed" + timestamp: String! + + "The block number of the contract deployment" + blockNumber: BigInt! +} +``` + +Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. + +> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). + +```bash +yarn install # install graph-cli +yarn subgraph:build # build the subgraph +yarn subgraph:deploy # deploy the subgraph +``` + +That's it! You have built and deployed a Substreams-powered subgraph. + +## Serving Substreams-powered subgraphs + +In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: + +```toml +[chains.mainnet] +shard = "main" +protocol = "ethereum" +provider = [ + { label = "substreams-provider-mainnet", + details = { type = "substreams", + url = "https://mainnet-substreams-url.grpc.substreams.io/", + token = "exampletokenhere" }}, + { label = "firehose-provider-mainnet", + details = { type = "firehose", + url = "https://mainnet-firehose-url.grpc.firehose.io/", + token = "exampletokenhere" }}, +] +``` diff --git a/website/pages/cs/cookbook/upgrading-a-subgraph.mdx b/website/pages/cs/cookbook/upgrading-a-subgraph.mdx new file mode 100644 index 000000000000..90ca820abc45 --- /dev/null +++ b/website/pages/cs/cookbook/upgrading-a-subgraph.mdx @@ -0,0 +1,225 @@ +--- +title: Upgrading an Existing Subgraph to The Graph Network +--- + +## Introduction + +This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! + +The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. + +### Prerequisites + +- You have already deployed a subgraph on the hosted service. +- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- You have a wallet with ETH to publish your subgraph on-chain. +- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. + +## Upgrading an Existing Subgraph to The Graph Network + +> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). + +1. Get the latest version of the graph-cli installed: + +```sh +npm install -g @graphprotocol/graph-cli +``` + +```sh +yarn global add @graphprotocol/graph-cli +``` + +Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. + +2. Inside the subgraph's main project repository, authenticate the subgraph to deploy and build on the studio: + +```sh +graph auth --studio +``` + +3. Generate files and build the subgraph: + +```sh +graph codegen && graph build +``` + +If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). + +4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. + +```sh +graph deploy --studio +``` + +5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): + +```sh +{ + users(first: 5) { + id + liquidityPositions { + id + } + } + bundles(first: 5) { + id + ethPrice + } +} +``` + +6. At this point, your subgraph is now deployed on Subgraph Studio, but not yet published to the decentralized network. You can now test the subgraph to make sure it is working as intended using the temporary query URL as seen on top of the right column above. As this name already suggests, this is a temporary URL and should not be used in production. + +- Updating is just publishing another version of your existing subgraph on-chain. +- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. +- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. + +7. Publish the subgraph on The Graph's decentralized network by hitting the "Publish" button. + +You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. + +And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). + +Feel free to leverage the [#Curators channel](https://discord.gg/rC8rBuRtbH) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. + +### Create an API key + +You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). + +![API key creation page](/img/api-image.png) + +At the end of each week, an invoice will be generated based on the query fees that have been incurred during this period. This invoice will be paid automatically using the GRT available in your balance. Your balance will be updated after the cost of your query fees are withdrawn. Query fees are paid in GRT via the Arbitrum network. You will need to add GRT to the Arbitrum billing contract to enable your API key via the following steps: + +- Purchase GRT on an exchange of your choice. +- Send the GRT to your wallet. +- On the Billing page in Studio, click on Add GRT. + +![Add GRT in billing](/img/Add-GRT-New-Page.png) + +- Follow the steps to add your GRT to your billing balance. +- Your GRT will be automatically bridged to the Arbitrum network and added to your billing balance. + +![Billing pane](/img/New-Billing-Pane.png) + +> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. + +### Securing your API key + +It is recommended that you secure the API by limiting its usage in two ways: + +1. Authorized Subgraphs +2. Authorized Domain + +You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). + +![Subgraph lockdown page](/img/subgraph-lockdown.png) + +### Querying your subgraph on the decentralized network + +Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. + +![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) + +As soon as the first Indexer has fully indexed your subgraph you can start to query the subgraph on the decentralized network. In order to retrieve the query URL for your subgraph, you can copy/paste it by clicking on the symbol next to the query URL. You will see something like this: + +`https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` + +Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. + +You can now use that Query URL in your dapp to send your GraphQL requests to. + +Congratulations! You are now a pioneer of decentralization! + +> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: + +```graphql +{ + stakers(block: { number_gte: 14486109 }) { + id + } +} +``` + +More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). + +## Updating a Subgraph on the Network + +If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. + +1. Make changes to your current subgraph. A good idea is to test small fixes on the Subgraph Studio by publishing to Goerli. +2. Deploy the following and specify the new version in the command (eg. v0.0.1, v0.0.2, etc): + +```sh +graph deploy --studio +``` + +3. Test the new version in the Subgraph Studio by querying in the playground +4. Publish the new version on The Graph Network. Remember that this requires gas (as described in the section above). + +### Owner Update Fee: Deep Dive + +> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). + +An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). + +The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. + +Let's make an example, this is only the case if your subgraph is being actively curated on: + +- 100,000 GRT is signaled using auto-migrate on v1 of a subgraph +- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned +- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. + +_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ + +### Maintaining a Stable Version of a Subgraph + +If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/rC8rBuRtbH) on Discord to let Indexers know when you're versioning your subgraphs. + +Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. + +### Updating the Metadata of a Subgraph + +You can update the metadata of your subgraphs without having to publish a new version. The metadata includes the subgraph name, image, description, website URL, source code URL, and categories. Developers can do this by updating their subgraph details in the Subgraph Studio where you can edit all applicable fields. + +Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. + +## Best Practices for Deploying a Subgraph to The Graph Network + +1. Leveraging an ENS name for Subgraph Development: + +- Set up your ENS [here](https://app.ens.domains/) +- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). + +2. The more filled out your profiles are, the better the chances for your subgraphs to be indexed and curated. + +## Deprecating a Subgraph on The Graph Network + +Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. + +## Querying a Subgraph + Billing on The Graph Network + +The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. + +In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). + +### Estimate Query Fees on the Network + +While this is not a live feature in the product UI, you can set your maximum budget per query by taking the amount you're willing to pay per month and dividing it by your expected query volume. + +While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. + +Remember that it's a dynamic and growing market, but how you interact with it is in your control. There is no maximum or minimum price specified in the protocol or the Gateways. For example, you can look at the price paid by a few of the dapps on the network (on a per-week basis), below. See the last column, which shows query fees in GRT. + +![QueryFee](/img/QueryFee.png) + +## Additional Resources + +If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: + + + +- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) +- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around + - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` +- [Subgraph Studio documentation](/deploying/subgraph-studio) diff --git a/website/pages/cs/deploying/_meta.js b/website/pages/cs/deploying/_meta.js new file mode 100644 index 000000000000..3d7abedc4d57 --- /dev/null +++ b/website/pages/cs/deploying/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/deploying/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/cs/deploying/deploying-a-subgraph-to-hosted.mdx new file mode 100644 index 000000000000..621f6321a0d4 --- /dev/null +++ b/website/pages/cs/deploying/deploying-a-subgraph-to-hosted.mdx @@ -0,0 +1,291 @@ +--- +title: Deploying a Subgraph to the Hosted Service +--- + +> If a network is not supported on the Hosted Service, you can run your own [graph-node](https://github.com/graphprotocol/graph-node) to index it. + +This page explains how to deploy a subgraph to the Hosted Service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). + +## Create a Hosted Service account + +Before using the Hosted Service, create an account in our Hosted Service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [Hosted Service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. + +## Store the Access Token + +After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. + +## Create a Subgraph on the Hosted Service + +Before deploying the subgraph, you need to create it in The Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _'Add Subgraph'_ button and fill in the information below as appropriate: + +**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. + +**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ + +**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ + +**Subtitle** - Text that will appear in subgraph cards. + +**Description** - Description of the subgraph, visible on the subgraph details page. + +**GitHub URL** - Link to the subgraph repository on GitHub. + +**Hide** - Switching this on hides the subgraph in the Graph Explorer. + +After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Defining a Subgraph section](/developing/defining-a-subgraph). + +## Deploy a Subgraph on the Hosted Service + +Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell the Graph Explorer to start indexing your subgraph using these files. + +You deploy the subgraph by running `yarn deploy` + +After deploying the subgraph, the Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. + +The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. + +## Redeploying a Subgraph + +When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. + +If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. + +## Deploying the subgraph to multiple networks + +In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. + +### Using graph-cli + +Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: + +```sh +Options: + + ... + --network Network configuration to use from the networks config file + --network-file Networks config file path (default: "./networks.json") +``` + +You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. + +**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. + +If you don't have a `networks.json` file, you'll need to manually create one with the following structure: + +```json +{ + "network1": { // the network name + "dataSource1": { // the dataSource name + "address": "0xabc...", // the contract address (optional) + "startBlock": 123456 // the startBlock (optional) + }, + "dataSource2": { + "address": "0x123...", + "startBlock": 123444 + } + }, + "network2": { + "dataSource1": { + "address": "0x987...", + "startBlock": 123 + }, + "dataSource2": { + "address": "0xxyz..", + "startBlock": 456 + } + }, + ... +} +``` + +**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. + +Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `goerli` networks, and this is your `subgraph.yaml`: + +```yaml +# ... +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x123...' + abi: Gravity + mapping: + kind: ethereum/events +``` + +This is what your networks config file should look like: + +```json +{ + "mainnet": { + "Gravity": { + "address": "0x123..." + } + }, + "goerli": { + "Gravity": { + "address": "0xabc..." + } + } +} +``` + +Now we can run one of the following commands: + +```sh +# Using default networks.json file +yarn build --network goerli + +# Using custom named file +yarn build --network goerli --network-file path/to/config +``` + +The `build` command will update your `subgraph.yaml` with the `goerli` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: + +```yaml +# ... +dataSources: + - kind: ethereum/contract + name: Gravity + network: goerli + source: + address: '0xabc...' + abi: Gravity + mapping: + kind: ethereum/events +``` + +Now you are ready to `yarn deploy`. + +**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: + +```sh +# Using default networks.json file +yarn deploy --network goerli + +# Using custom named file +yarn deploy --network goerli --network-file path/to/config +``` + +### Using subgraph.yaml template + +One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). + +To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Goerli using different contract addresses. You could then define two config files providing the addresses for each network: + +```json +{ + "network": "mainnet", + "address": "0x123..." +} +``` + +and + +```json +{ + "network": "goerli", + "address": "0xabc..." +} +``` + +Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: + +```yaml +# ... +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + network: {{network}} + source: + address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' + address: '{{address}}' + abi: Gravity + mapping: + kind: ethereum/events +``` + +In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: + +```json +{ + ... + "scripts": { + ... + "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", + "prepare:goerli": "mustache config/goerli.json subgraph.template.yaml > subgraph.yaml" + }, + "devDependencies": { + ... + "mustache": "^3.1.0" + } +} +``` + +To deploy this subgraph for mainnet or Goerli you would now simply run one of the two following commands: + +```sh +# Mainnet: +yarn prepare:mainnet && yarn deploy + +# Goerli: +yarn prepare:goerli && yarn deploy +``` + +A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). + +**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. + +## Checking subgraph health + +If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. + +Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the Hosted Service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: + +```graphql +{ + indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { + synced + health + fatalError { + message + block { + number + hash + } + handler + } + chains { + chainHeadBlock { + number + } + latestBlock { + number + } + } + } +} +``` + +This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. + +## Hosted service subgraph archive policy + +The Hosted Service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. + +To improve the performance of the service for active subgraphs, the Hosted Service will archive subgraphs that are inactive. + +**A subgraph is defined as "inactive" if it was deployed to the Hosted Service more than 45 days ago, and if it has received 0 queries in the last 45 days.** + +Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's Hosted Service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. + +## Subgraph Studio subgraph archive policy + +When a new version of a subgraph is deployed, the previous version is archived (deleted from the graph-node DB). This only happens if the previous version is not published to The Graph's decentralized network. + +When a subgraph version isn’t queried for over 45 days, that version is archived. + +Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx new file mode 100644 index 000000000000..8cfa32b036f0 --- /dev/null +++ b/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx @@ -0,0 +1,70 @@ +--- +title: Deploying a Subgraph to the Subgraph Studio +--- + +> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. + +These are the steps to deploy your subgraph to the Subgraph Studio: + +- Install The Graph CLI (with either yarn or npm) +- Create your Subgraph in the Subgraph Studio +- Authenticate your account from the CLI +- Deploying a Subgraph to the Subgraph Studio + +## Installing Graph CLI + +We are using the same CLI to deploy subgraphs to our [hosted service](https://thegraph.com/hosted-service/) and to the [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install graph-cli. This can be done using npm or yarn. + +**Install with yarn:** + +```bash +yarn global add @graphprotocol/graph-cli +``` + +**Install with npm:** + +```bash +npm install -g @graphprotocol/graph-cli +``` + +## Create your Subgraph in Subgraph Studio + +Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. + +## Initialize your Subgraph + +Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: + +```bash +graph init --studio +``` + +The `` value can be found on your subgraph details page in Subgraph Studio: + +![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) + +After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. + +## Graph Auth + +Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. + +Here is the command that you need to use to authenticate from the CLI: + +```bash +graph auth --studio +``` + +## Deploying a Subgraph to Subgraph Studio + +Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. + +Here is the CLI command that you need to use to deploy your subgraph. + +```bash +graph deploy --studio +``` + +After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. + +Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/cs/deploying/hosted-service.mdx b/website/pages/cs/deploying/hosted-service.mdx new file mode 100644 index 000000000000..2e6093531110 --- /dev/null +++ b/website/pages/cs/deploying/hosted-service.mdx @@ -0,0 +1,51 @@ +--- +title: What is the Hosted Service? +--- + +> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). + +This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). + +If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. + +For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). + +## Create a Subgraph + +First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` + +### From an Existing Contract + +If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. + +You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from [Etherscan](https://etherscan.io/). + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + +Additionally, you can use the following optional arguments. If the ABI cannot be fetched from Etherscan, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. + +```sh +--network \ +--abi \ +``` + +The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** + +### From an Example Subgraph + +The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: + +``` +graph init --from-example --product hosted-service / [] +``` + +The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. + +## Supported Networks on the hosted service + +You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/cs/deploying/subgraph-studio-faqs.mdx b/website/pages/cs/deploying/subgraph-studio-faqs.mdx new file mode 100644 index 000000000000..65217d4b7741 --- /dev/null +++ b/website/pages/cs/deploying/subgraph-studio-faqs.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to the Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Mainnet can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of The Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in the Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/pages/cs/deploying/subgraph-studio.mdx b/website/pages/cs/deploying/subgraph-studio.mdx new file mode 100644 index 000000000000..1406065463d4 --- /dev/null +++ b/website/pages/cs/deploying/subgraph-studio.mdx @@ -0,0 +1,95 @@ +--- +title: How to Use the Subgraph Studio +--- + +Welcome to your new launchpad 👩🏽‍🚀 + +The Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). + +What you can do in the Subgraph Studio: + +- Create a subgraph through the Studio UI +- Deploy a subgraph using the CLI +- Publish a subgraph with the Studio UI +- Test it in the playground +- Integrate it in staging using the query URL +- Create and manage your API keys for specific subgraphs + +Here in the Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. + +Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: + +- Your user account controls +- A list of subgraphs that you’ve created +- A section to manage, view details and visualize the status of a specific subgraph +- A section to manage your API keys that you will need to query a subgraph +- A section to manage your billing + +## How to Create Your Account + +1. Sign in with your wallet - you can do this via MetaMask or WalletConnect +1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. + +## How to Create your Subgraph in Subgraph Studio + +The best part! When you first create a subgraph, you’ll be directed to fill out: + +- Your Subgraph Name +- Image +- Description +- Categories (e.g. `DeFi`, `NFTs`, `Governance`) +- Website + +## Subgraph Compatibility with The Graph Network + +The Graph Network is not yet able to support all of the data-sources & features available on the Hosted Service. In order to be supported by Indexers on the network, subgraphs must: + +- Index a [supported network](/developing/supported-networks) +- Must not use any of the following features: + - ipfs.cat & ipfs.map + - Non-fatal errors + - Grafting + +More features & networks will be added to The Graph Network incrementally. + +### Subgraph lifecycle flow + +![Subgraph Lifecycle](/img/subgraph-lifecycle.png) + +After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (pst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. + +## Testing your Subgraph in Subgraph Studio + +If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. + +## Publish your Subgraph in Subgraph Studio + +You’ve made it this far - congrats! + +In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [blog](https://thegraph.com/blog/building-with-subgraph-studio). + +Check out the video overview below as well: + + + +Remember, while you’re going through your publishing flow, you’ll be able to push to either mainnet or Goerli. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Goerli, which is free to do. This will allow you to see how the subgraph will work in The Graph Explorer and will allow you to test curation elements. + +Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! + +Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. + +**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! + +## Versioning your Subgraph with the CLI + +Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to The Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. + +Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in The Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. + +Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). + +### Automatic Archiving of Subgraph Versions + +Whenever you deploy a new subgraph version in the Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. + +![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/cs/developing/_meta.js b/website/pages/cs/developing/_meta.js new file mode 100644 index 000000000000..48d6b89bb3fe --- /dev/null +++ b/website/pages/cs/developing/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/developing/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/developing/assemblyscript-api.mdx b/website/pages/cs/developing/assemblyscript-api.mdx new file mode 100644 index 000000000000..35637a39f401 --- /dev/null +++ b/website/pages/cs/developing/assemblyscript-api.mdx @@ -0,0 +1,816 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## Installation + +Subgraphs created with [`graph init`](/developing/creating-a-subgraph) come with preconfigured dependencies. All that is required to install these dependencies is to run one of the following commands: + +```sh +yarn install # Yarn +npm install # NPM +``` + +If the subgraph was created from scratch, one of the following two commands will install the Graph TypeScript library as a dependency: + +```sh +yarn add --dev @graphprotocol/graph-ts # Yarn +npm install --save-dev @graphprotocol/graph-ts # NPM +``` + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. The current mapping API version is 0.0.6. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. +- `x.toString(): string` – turns `BigInt` into a decimal number string. +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entity that the Holder entity was derived from +let token = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### Common AssemblyScript Issues + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/cs/developing/creating-a-subgraph.mdx b/website/pages/cs/developing/creating-a-subgraph.mdx new file mode 100644 index 000000000000..1fc288833c35 --- /dev/null +++ b/website/pages/cs/developing/creating-a-subgraph.mdx @@ -0,0 +1,1178 @@ +--- +title: Creating a Subgraph +--- + +A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. + +![Defining a Subgraph](/img/defining-a-subgraph.png) + +The subgraph definition consists of a few files: + +- `subgraph.yaml`: a YAML file containing the subgraph manifest + +- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL + +- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) + +> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). + +Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-cli) which you will need to build and deploy a subgraph. + +## Install the Graph CLI + +The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. + +Once you have `yarn`, install the Graph CLI by running + +**Install with yarn:** + +```bash +yarn global add @graphprotocol/graph-cli +``` + +**Install with npm:** + +```bash +npm install -g @graphprotocol/graph-cli +``` + +Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph on the Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. + +## From An Existing Contract + +The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. + +```sh +graph init \ + --product subgraph-studio + --from-contract \ + [--network ] \ + [--abi ] \ + [] +``` + +The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. + +## From An Example Subgraph + +The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: + +```sh +graph init --studio +``` + +The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. + +## Add New dataSources To An Existing Subgraph + +Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. + +```sh +graph add
[] + +Options: + + --abi Path to the contract ABI (default: download from Etherscan) + --contract-name Name of the contract (default: Contract) + --merge-entities Whether to merge entities with the same name (default: false) + --network-file Networks config file path (default: "./networks.json") +``` + +The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. + +The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: + +- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. +- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. + +The contract `address` will be written to the `networks.json` for the relevant network. + +> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. + +## The Subgraph Manifest + +The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). + +For the example subgraph, `subgraph.yaml` is: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +repository: https://github.com/graphprotocol/graph-tooling +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' + abi: Gravity + startBlock: 6175244 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Gravity + file: ./abis/Gravity.json + eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + - event: UpdatedGravatar(uint256,address,string,string) + handler: handleUpdatedGravatar + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCall + filter: + kind: call + file: ./src/mapping.ts +``` + +The important entries to update for the manifest are: + +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. + +- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. + +- `features`: a list of all used [feature](#experimental-features) names. + +- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. + +- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. + +- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. + +- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. + +- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. + +- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. + +- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. + +A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. + +The triggers for a data source within a block are ordered using the following process: + +1. Event and call triggers are first ordered by transaction index within the block. +2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. +3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. + +These ordering rules are subject to change. + +### Getting The ABIs + +The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: + +- If you are building your own project, you will likely have access to your most current ABIs. +- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. + +## The GraphQL Schema + +The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. + +## Defining Entities + +Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. + +With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. + +### Good Example + +The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. + +```graphql +type Gravatar @entity(immutable: true) { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String + accepted: Boolean +} +``` + +### Bad Example + +The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. + +```graphql +type GravatarAccepted @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} + +type GravatarDeclined @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} +``` + +### Optional and Required Fields + +Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: + +``` +Null value resolved for non-null field 'name' +``` + +Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. + +For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. + +### Built-In Scalar Types + +#### GraphQL Supported Scalars + +We support the following scalars in our GraphQL API: + +| Type | Description | +| --- | --- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | + +#### Enums + +You can also create enums within a schema. Enums have the following syntax: + +```graphql +enum TokenStatus { + OriginalOwner + SecondOwner + ThirdOwner +} +``` + +Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: + +More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). + +#### Entity Relationships + +An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. + +Relationships are defined on entities just like any other field except that the type specified is that of another entity. + +#### One-To-One Relationships + +Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: + +```graphql +type Transaction @entity(immutable: true) { + id: Bytes! + transactionReceipt: TransactionReceipt +} + +type TransactionReceipt @entity(immutable: true) { + id: Bytes! + transaction: Transaction +} +``` + +#### One-To-Many Relationships + +Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +#### Reverse Lookups + +Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. + +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. + +#### Example + +We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! + tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +#### Many-To-Many Relationships + +For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. + +#### Example + +Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [User!]! +} + +type User @entity { + id: Bytes! + name: String! + organizations: [Organization!]! @derivedFrom(field: "members") +} +``` + +A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [UserOrganization!]! @derivedFrom(field: "organization") +} + +type User @entity { + id: Bytes! + name: String! + organizations: [UserOrganization!] @derivedFrom(field: "user") +} + +type UserOrganization @entity { + id: Bytes! # Set to `user.id.concat(organization.id)` + user: User! + organization: Organization! +} +``` + +This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: + +```graphql +query usersWithOrganizations { + users { + organizations { + # this is a UserOrganization entity + organization { + name + } + } + } +} +``` + +This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. + +#### Adding comments to the schema + +As per GraphQL spec, comments can be added above schema entity attributes using double quotations `""`. This is illustrated in the example below: + +```graphql +type MyFirstEntity @entity { + "unique identifier and primary key of the entity" + id: Bytes! + address: Bytes! +} +``` + +## Defining Fulltext Search Fields + +Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. + +A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. + +To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. + +```graphql +type _Schema_ + @fulltext( + name: "bandSearch" + language: en + algorithm: rank + include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] + ) + +type Band @entity { + id: Bytes! + name: String! + description: String! + bio: String + wallet: Address + labels: [Label!]! + discography: [Album!]! + members: [Musician!]! +} +``` + +The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. + +```graphql +query { + bandSearch(text: "breaks & electro & detroit") { + id + name + description + wallet + } +} +``` + +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. + +### Languages supported + +Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". + +Supported language dictionaries: + +| Code | Dictionary | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portuguese | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | + +### Ranking Algorithms + +Supported algorithms for ordering results: + +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | Use the match quality (0-1) of the fulltext query to order the results. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | + +## Writing Mappings + +The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. + +For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. + +In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: + +```javascript +import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' +import { Gravatar } from '../generated/schema' + +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleUpdatedGravatar(event: UpdatedGravatar): void { + let id = event.params.id + let gravatar = Gravatar.load(id) + if (gravatar == null) { + gravatar = new Gravatar(id) + } + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} +``` + +The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. + +The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. + +### Recommended IDs for Creating New Entities + +Every entity has to have an `id` that is unique among all entities of the same type. An entity's `id` value is set when the entity is created. Below are some recommended `id` values to consider when creating new entities. NOTE: The value of `id` must be a `string`. + +- `event.params.id.toHex()` +- `event.transaction.from.toHex()` +- `event.transaction.hash.toHex() + "-" + event.logIndex.toString()` + +We provide the [Graph Typescript Library](https://github.com/graphprotocol/graph-ts) which contains utilies for interacting with the Graph Node store and conveniences for handling smart contract data and entities. You can use this library in your mappings by importing `@graphprotocol/graph-ts` in `mapping.ts`. + +## Code Generation + +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. + +This is done with + +```sh +graph codegen [--output-dir ] [] +``` + +but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: + +```sh +# Yarn +yarn codegen + +# NPM +npm run codegen +``` + +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. + +```javascript +import { + // The contract class: + Gravity, + // The events classes: + NewGravatar, + UpdatedGravatar, +} from '../generated/Gravity/Gravity' +``` + +In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with + +```javascript +import { Gravatar } from '../generated/schema' +``` + +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. + +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to the Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. + +## Data Source Templates + +A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. + +The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. + +### Data Source for the Main Contract + +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: Factory + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - Directory + abis: + - name: Factory + file: ./abis/factory.json + eventHandlers: + - event: NewExchange(address,address) + handler: handleNewExchange +``` + +### Data Source Templates for Dynamically Created Contracts + +Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + # ... other source fields for the main contract ... +templates: + - name: Exchange + kind: ethereum/contract + network: mainnet + source: + abi: Exchange + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/exchange.ts + entities: + - Exchange + abis: + - name: Exchange + file: ./abis/exchange.json + eventHandlers: + - event: TokenPurchase(address,uint256,uint256) + handler: handleTokenPurchase + - event: EthPurchase(address,uint256,uint256) + handler: handleEthPurchase + - event: AddLiquidity(address,uint256,uint256) + handler: handleAddLiquidity + - event: RemoveLiquidity(address,uint256,uint256) + handler: handleRemoveLiquidity +``` + +### Instantiating a Data Source Template + +In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + // Start indexing the exchange; `event.params.exchange` is the + // address of the new exchange contract + Exchange.create(event.params.exchange) +} +``` + +> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> +> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. + +### Data Source Context + +Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + let context = new DataSourceContext() + context.setString('tradingPair', event.params.tradingPair) + Exchange.createWithContext(event.params.exchange, context) +} +``` + +Inside a mapping of the `Exchange` template, the context can then be accessed: + +```typescript +import { dataSource } from '@graphprotocol/graph-ts' + +let context = dataSource.context() +let tradingPair = context.getString('tradingPair') +``` + +There are setters and getters like `setString` and `getString` for all value types. + +## Start Blocks + +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. + +```yaml +dataSources: + - kind: ethereum/contract + name: ExampleSource + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: ExampleContract + startBlock: 6627917 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - User + abis: + - name: ExampleContract + file: ./abis/ExampleContract.json + eventHandlers: + - event: NewEvent(address,address) + handler: handleNewEvent +``` + +> **Note:** The contract creation block can be quickly looked up on Etherscan: +> +> 1. Search for the contract by entering its address in the search bar. +> 2. Click on the creation transaction hash in the `Contract Creator` section. +> 3. Load the transaction details page where you'll find the start block for that contract. + +## Call Handlers + +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. + +Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. + +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. + +### Defining a Call Handler + +To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar +``` + +The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. + +### Mapping Function + +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: + +```typescript +import { CreateGravatarCall } from '../generated/Gravity/Gravity' +import { Transaction } from '../generated/schema' + +export function handleCreateGravatar(call: CreateGravatarCall): void { + let id = call.transaction.hash + let transaction = new Transaction(id) + transaction.displayName = call.inputs._displayName + transaction.imageUrl = call.inputs._imageUrl + transaction.save() +} +``` + +The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. + +## Block Handlers + +In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. + +### Supported Filters + +```yaml +filter: + kind: call +``` + +_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ + +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. + +The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: dev + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCallToContract + filter: + kind: call +``` + +### Mapping Function + +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. + +```typescript +import { ethereum } from '@graphprotocol/graph-ts' + +export function handleBlock(block: ethereum.Block): void { + let id = block.hash + let entity = new Block(id) + entity.save() +} +``` + +## Anonymous Events + +If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: + +```yaml +eventHandlers: + - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) + topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' + handler: handleGive +``` + +An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. + +## Transaction Receipts in Event Handlers + +Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. + +To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. + +```yaml +eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + receipt: true +``` + +Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. + +## Experimental features + +Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: + +| Feature | Name | +| --------------------------------------------------------- | --------------------------------------------------- | +| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | +| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | +| [IPFS on Ethereum Contracts](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | + +For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - fullTextSearch + - nonFatalErrors +dataSources: ... +``` + +Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. + +### IPFS on Ethereum Contracts + +A common use case for combining IPFS with Ethereum is to store data on IPFS that would be too expensive to maintain on-chain, and reference the IPFS hash in Ethereum contracts. + +Given such IPFS hashes, subgraphs can read the corresponding files from IPFS using `ipfs.cat` and `ipfs.map`. To do this reliably, it is required that these files are pinned to an IPFS node with high availability, so that the [hosted service](https://thegraph.com/hosted-service) IPFS node can find them during indexing. + +> **Note:** The Graph Network does not yet support `ipfs.cat` and `ipfs.map`, and developers should not deploy subgraphs using that functionality to the network via the Studio. + +> **[Feature Management](#experimental-features):** `ipfsOnEthereumContracts` must be declared under `features` in the subgraph manifest. For non EVM chains, the `nonDeterministicIpfs` alias can also be used for the same purpose. + +When running a local Graph Node, the `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` environment variable must be set in order to index subgraphs using this experimental functionality. + +### Non-fatal errors + +Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. + +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. + +Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - nonFatalErrors + ... +``` + +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: + +```graphql +foos(first: 100, subgraphError: allow) { + id +} + +_meta { + hasIndexingErrors +} +``` + +If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: + +```graphql +"data": { + "foos": [ + { + "id": "0xdead" + } + ], + "_meta": { + "hasIndexingErrors": true + } +}, +"errors": [ + { + "message": "indexing_error" + } +] +``` + +### Grafting onto Existing Subgraphs + +When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. + +A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: + +```yaml +description: ... +graft: + base: Qm... # Subgraph ID of base subgraph + block: 7345624 # Block number +``` + +When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. + +Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. + +The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: + +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented + +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. + +## File Data Sources + +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. + +> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. + +### Overview + +Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. + +This is similar to the [existing data source templates](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. + +> This replaces the existing `ipfs.cat` API + +### Upgrade guide + +#### Update `graph-ts` and `graph-cli` + +File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 + +#### Add a new entity type which will be updated when files are found + +File data sources cannot access or update chain-based entities, but must update file specific entities. + +This may mean splitting out fields from existing entities into separate entities, linked together. + +Original combined entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + externalURL: String! + ipfsURI: String! + image: String! + name: String! + description: String! + type: String! + updatedAtTimestamp: BigInt + owner: User! +} +``` + +New, split entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + ipfsURI: TokenMetadata + updatedAtTimestamp: BigInt + owner: String! +} + +type TokenMetadata @entity { + id: ID! + image: String! + externalURL: String! + name: String! + description: String! +} +``` + +If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! + +> You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. + +#### Add a new templated data source with `kind: file/ipfs` + +This is the data source which will be spawned when a file of interest is identified. + +```yaml +templates: + - name: TokenMetadata + kind: file/ipfs + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + file: ./src/mapping.ts + handler: handleMetadata + entities: + - TokenMetadata + abis: + - name: Token + file: ./abis/Token.json +``` + +> Currently `abis` are required, though it is not possible to call contracts from within file data sources + +The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#Limitations) for more details. + +#### Create a new handler to process files + +This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). + +The CID of the file as a readable string can be accessed via the `dataSource` as follows: + +```typescript +const cid = dataSource.stringParam() +``` + +Example handler: + +```typescript +import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' +import { TokenMetadata } from '../generated/schema' + +export function handleMetadata(content: Bytes): void { + let tokenMetadata = new TokenMetadata(dataSource.stringParam()) + const value = json.fromBytes(content).toObject() + if (value) { + const image = value.get('image') + const name = value.get('name') + const description = value.get('description') + const externalURL = value.get('external_url') + + if (name && image && description && externalURL) { + tokenMetadata.name = name.toString() + tokenMetadata.image = image.toString() + tokenMetadata.externalURL = externalURL.toString() + tokenMetadata.description = description.toString() + } + + tokenMetadata.save() + } +} +``` + +#### Spawn file data sources when required + +You can now create file data sources during execution of chain-based handlers: + +- Import the template from the auto-generated `templates` +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier + +> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) + +Example: + +```typescript +import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' + +const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' +//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. + +export function handleTransfer(event: TransferEvent): void { + let token = Token.load(event.params.tokenId.toString()) + if (!token) { + token = new Token(event.params.tokenId.toString()) + token.tokenID = event.params.tokenId + + token.tokenURI = '/' + event.params.tokenId.toString() + '.json' + const tokenIpfsHash = ipfshash + token.tokenURI + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + + token.ipfsURI = tokenIpfsHash + + TokenMetadataTemplate.create(tokenIpfsHash) + } + + token.updatedAtTimestamp = event.block.timestamp + token.owner = event.params.to.toHexString() + token.save() +} +``` + +This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. + +This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. + +> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file + +Congratulations, you are using file data sources! + +#### Deploying your subgraphs + +You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. + +#### Limitations + +File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: + +- Entities created by File Data Sources are immutable, and cannot be updated +- File Data Source handlers cannot access entities from other file data sources +- Entities associated with File Data Sources cannot be accessed by chain-based handlers + +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! + +Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. + +#### Best practices + +If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. + +You can use [DataSource context](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. + +If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. + +> We are working to improve the above recommendation, so queries only return the "most recent" version + +#### Known issues + +File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. + +Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Workaround is to create file data source handlers in a dedicated file. + +#### Examples + +[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) + +#### References + +[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/cs/developing/developer-faqs.mdx b/website/pages/cs/developing/developer-faqs.mdx new file mode 100644 index 000000000000..0b925a79dce2 --- /dev/null +++ b/website/pages/cs/developing/developer-faqs.mdx @@ -0,0 +1,142 @@ +--- +title: Developer FAQs +--- + +## 1. What is a subgraph? + +A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using the Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available to be queried by subgraph consumers. + +## 2. Can I delete my subgraph? + +It is not possible to delete subgraphs once they are created. + +## 3. Can I change my subgraph name? + +No. Once a subgraph is created, the name cannot be changed. Make sure to think of this carefully before you create your subgraph so it is easily searchable and identifiable by other dapps. + +## 4. Can I change the GitHub account associated with my subgraph? + +No. Once a subgraph is created, the associated GitHub account cannot be changed. Make sure to think of this carefully before you create your subgraph. + +## 5. Am I still able to create a subgraph if my smart contracts don't have events? + +It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are by far the fastest way to retrieve useful data. + +If the contracts you are working with do not contain events, your subgraph can use call and block handlers to trigger indexing. Although this is not recommended, as performance will be significantly slower. + +## 6. Is it possible to deploy one subgraph with the same name for multiple networks? + +You will need separate names for multiple networks. While you can't have different subgraphs under the same name, there are convenient ways of having a single codebase for multiple networks. Find more on this in our documentation: [Redeploying a Subgraph](/deploying/deploying-a-subgraph-to-hosted#redeploying-a-subgraph) + +## 7. How are templates different from data sources? + +Templates allow you to create data sources on the fly, while your subgraph is indexing. It might be the case that your contract will spawn new contracts as people interact with it, and since you know the shape of those contracts (ABI, events, etc) upfront you can define how you want to index them in a template and when they are spawned your subgraph will create a dynamic data source by supplying the contract address. + +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph#data-source-templates). + +## 8. How do I make sure I'm using the latest version of graph-node for my local deployments? + +You can run the following command: + +```sh +docker pull graphprotocol/graph-node:latest +``` + +**NOTE:** docker / docker-compose will always use whatever graph-node version was pulled the first time you ran it, so it is important to do this to make sure you are up to date with the latest version of graph-node. + +## 9. How do I call a contract function or access a public state variable from my subgraph mappings? + +Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/developing/assemblyscript-api). + +## 10. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another datasource in `subgraph.yaml` after running `graph init`? + +Unfortunately, this is currently not possible. `graph init` is intended as a basic starting point, from which you can then add more data sources manually. + +## 11. I want to contribute or add a GitHub issue. Where can I find the open source repositories? + +- [graph-node](https://github.com/graphprotocol/graph-node) +- [graph-cli](https://github.com/graphprotocol/graph-cli) +- [graph-ts](https://github.com/graphprotocol/graph-ts) + +## 12. What is the recommended way to build "autogenerated" ids for an entity when handling events? + +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. + +## 13. When listening to multiple contracts, is it possible to select the contract order to listen to events? + +Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. + +## 14. Is it possible to differentiate between networks (mainnet, Goerli, local) from within event handlers? + +Yes. You can do this by importing `graph-ts` as per the example below: + +```javascript +import { dataSource } from '@graphprotocol/graph-ts' + +dataSource.network() +dataSource.address() +``` + +## 15. Do you support block and call handlers on Goerli? + +Yes. Goerli supports block handlers, call handlers and event handlers. It should be noted that event handlers are far more performant than the other two handlers, and they are supported on every EVM-compatible network. + +## 16. Can I import ethers.js or other JS libraries into my subgraph mappings? + +Not currently, as mappings are written in AssemblyScript. One possible alternative solution to this is to store raw data in entities and perform logic that requires JS libraries on the client. + +## 17. Is it possible to specify what block to start indexing on? + +Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created: Start blocks + +## 18. Are there some tips to increase the performance of indexing? My subgraph is taking a very long time to sync + +Yes, you should take a look at the optional start block feature to start indexing from the block that the contract was deployed: [Start blocks](/developing/creating-a-subgraph#start-blocks) + +## 19. Is there a way to query the subgraph directly to determine the latest block number it has indexed? + +Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: + +```sh +curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/index-node/graphql +``` + +## 20. What networks are supported by The Graph? + +You can find the list of the supported networks [here](/developing/supported-networks). + +## 21. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? + +You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. + +## 22. Is this possible to use Apollo Federation on top of graph-node? + +Federation is not supported yet, although we do want to support it in the future. At the moment, something you can do is use schema stitching, either on the client or via a proxy service. + +## 23. Is there a limit to how many objects The Graph can return per query? + +By default, query responses are limited to 100 items per collection. If you want to receive more, you can go up to 1000 items per collection and beyond that, you can paginate with: + +```graphql +someCollection(first: 1000, skip: ) { ... } +``` + +## 24. If my dapp frontend uses The Graph for querying, do I need to write my query key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? + +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. + +## 25. Where do I go to find my current subgraph on the Hosted Service? + +Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). + +## 26. Will the Hosted Service start charging query fees? + +The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. + +## 27. When will the Hosted Service be shut down? + +The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). + +## 28. How do I update a subgraph on mainnet? + +If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/cs/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/cs/developing/substreams-powered-subgraphs-faq.mdx new file mode 100644 index 000000000000..854850b53954 --- /dev/null +++ b/website/pages/cs/developing/substreams-powered-subgraphs-faq.mdx @@ -0,0 +1,91 @@ +--- +title: Substreams-powered subgraphs FAQ +--- + +## What are Substreams? + +Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](/substreams/developers-guide/sink-targets/README/#substreams-sinks-overview) their data anywhere. + +Go to the [Substreams Documentation](/substreams/README/) to learn more about Substreams. + +## What are Substreams-powered subgraphs? + +[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. + +If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. + +## How are Substreams-powered subgraphs different from subgraphs? + +Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. + +By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. + +## What are the benefits of using Substreams-powered subgraphs? + +Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](/substreams/developers-guide/creating-your-manifest/#module-definitions) to output to different [sinks](/substreams/developers-guide/sink-targets/#substreams-sinks-overview) such as PostgreSQL, MongoDB, and Kafka. + +## What are the benefits of Substreams? + +There are many benefits to using Substreams, including: + +- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. + +- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). + +- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. + +- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. + +- Access to additional data which is not available as part of the JSON RPC + +- All the benefits of the Firehose. + +## What is the Firehose? + +Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. + +Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. + +## What are the benefits of the Firehose? + +There are many benefits to using Firehose, including: + +- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. + +- Prevents downtimes: Designed from the ground up for High Availability. + +- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. + +- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. + +- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. + +## Where can developers access more information about Substreams-powered subgraphs and Substreams? + +The [Substreams documentation](/substreams/README/) will teach you how to build Substreams modules. + +The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. + +## What is the role of Rust modules in Substreams? + +Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. + +See [modules documentation](/substreams/developers-guide/modules/types/) for details. + +## What makes Substreams composable? + +When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. + +As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. + +## How can you build and deploy a Substreams-powered Subgraph? + +After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). + +## Where can I find examples of Substreams and Substreams-powered subgraphs? + +You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. + +## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? + +The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. diff --git a/website/pages/cs/developing/supported-networks.mdx b/website/pages/cs/developing/supported-networks.mdx new file mode 100644 index 000000000000..328e5db99784 --- /dev/null +++ b/website/pages/cs/developing/supported-networks.mdx @@ -0,0 +1,48 @@ +--- +title: Supported Networks +--- + +import { getSupportedNetworks } from '@/src/getSupportedNetworks' + + + + + + + + + + + + {getSupportedNetworks().map((network) => ( + + + + + + + + + + ))} +
NetworkCLI NameChain IDHosted ServiceSubgraph StudioDecentralized NetworkSubstreams Support
{network.name} + {network.cliName} + {network.chainId}{network.supportedOnHosted ? '✓' : null}{network.supportedOnStudio ? '✓' : null}{network.supportedOnNetwork ? `✓${network.isBeta ? '*' : ''}` : null}{network.substreams ? '✓' : null}
+ +\*In beta. + +The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. + +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. + +Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. + +For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). + +Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Subgraph Studio and decentralized network. + +## Graph Node + +If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. + +Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/cs/developing/unit-testing-framework.mdx b/website/pages/cs/developing/unit-testing-framework.mdx new file mode 100644 index 000000000000..8ffc66465e3a --- /dev/null +++ b/website/pages/cs/developing/unit-testing-framework.mdx @@ -0,0 +1,1099 @@ +--- +title: Unit Testing Framework +--- + +Matchstick is a unit testing framework, developed by [LimeChain](https://limechain.tech/), that enables subgraph developers to test their mapping logic in a sandboxed environment and deploy their subgraphs with confidence! + +## Getting Started + +### Install dependencies + +In order to use the test helper methods and run the tests, you will need to install the following dependencies: + +```sh +yarn add --dev matchstick-as +``` + +❗ `graph-node` depends on PostgreSQL, so if you don't already have it, you will need to install it. We highly advise using the commands below as adding it in any other way may cause unexpected errors! + +#### MacOS + +Postgres installation command: + +```sh +brew install postgresql +``` + +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` + +```sh +ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib +``` + +#### Linux + +Postgres installation command (depends on your distro): + +```sh +sudo apt install postgresql +``` + +### WSL (Windows Subsystem for Linux) + +You can use Matchstick on WSL both using the Docker approach and the binary approach. As WSL can be a bit tricky, here's a few tips in case you encounter issues like + +``` +static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = +``` + +or + +``` +/node_modules/gluegun/build/index.js:13 throw up; +``` + +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running + +``` +sudo apt-get install libpq-dev +``` + +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as + +```json +{ + "name": "demo-subgraph", + "version": "0.1.0", + "scripts": { + "test": "graph test", + ... + }, + "dependencies": { + "@graphprotocol/graph-cli": "^0.30.0", + "@graphprotocol/graph-ts": "^0.27.0", + "matchstick-as": "^0.5.0" + } +} +``` + +### Usage + +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). + +### CLI options + +This will run all tests in the test folder: + +```sh +graph test +``` + +This will run a test named gravity.test.ts and/or all test inside of a folder named gravity: + +```sh +graph test gravity +``` + +This will run only that specific test file: + +```sh +graph test path/to/file.test.ts +``` + +**Options:** + +```sh +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used +``` + +### Docker + +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. + +❗ If you have previously ran `graph test` you may encounter the following error during docker build: + +```sh + error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied +``` + +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` + +### Configuration + +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: + +```yaml +testsFolder: path/to/tests +libsFolder: path/to/libs +manifestPath: path/to/subgraph.yaml +``` + +### Demo subgraph + +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) + +### Video tutorials + +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) + +## Tests structure (>=0.5.0) + +_**IMPORTANT: Requires matchstick-as >=0.5.0**_ + +### describe() + +`describe(name: String , () => {})` - Defines a test group. + +**_Notes:_** + +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ + +Example: + +```typescript +import { describe, test } from "matchstick-as/assembly/index" +import { handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar()", () => { + test("Should create a new Gravatar entity", () => { + ... + }) +}) +``` + +Nested `describe()` example: + +```typescript +import { describe, test } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar } from "../../src/gravity" + +describe("handleUpdatedGravatar()", () => { + describe("When entity exists", () => { + test("updates the entity", () => { + ... + }) + }) + + describe("When entity does not exists", () => { + test("it creates a new entity", () => { + ... + }) + }) +}) +``` + +--- + +### test() + +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. + +Example: + +```typescript +import { describe, test } from "matchstick-as/assembly/index" +import { handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar()", () => { + test("Should create a new Entity", () => { + ... + }) +}) +``` + +or + +```typescript +test("handleNewGravatar() should create a new entity", () => { + ... +}) + + +``` + +--- + +### beforeAll() + +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. + +Examples: + +Code inside `beforeAll` will execute once before _all_ tests in the file. + +```typescript +import { describe, test, beforeAll } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +import { Gravatar } from "../../generated/schema" + +beforeAll(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = “First Gravatar” + gravatar.save() + ... +}) + +describe("When the entity does not exist", () => { + test("it should create a new Gravatar with id 0x1", () => { + ... + }) +}) + +describe("When entity already exists", () => { + test("it should update the Gravatar with id 0x0", () => { + ... + }) +}) +``` + +Code inside `beforeAll` will execute once before all tests in the first describe block + +```typescript +import { describe, test, beforeAll } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +import { Gravatar } from "../../generated/schema" + +describe("handleUpdatedGravatar()", () => { + beforeAll(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = “First Gravatar” + gravatar.save() + ... + }) + + test("updates Gravatar with id 0x0", () => { + ... + }) + + test("creates new Gravatar with id 0x1", () => { + ... + }) +}) +``` + +--- + +### afterAll() + +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. + +Example: + +Code inside `afterAll` will execute once after _all_ tests in the file. + +```typescript +import { describe, test, afterAll } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +import { store } from "@graphprotocol/graph-ts" + +afterAll(() => { + store.remove("Gravatar", "0x0") + ... +}) + +describe("handleNewGravatar, () => { + test("creates Gravatar with id 0x0", () => { + ... + }) +}) + +describe("handleUpdatedGravatar", () => { + test("updates Gravatar with id 0x0", () => { + ... + }) +}) +``` + +Code inside `afterAll` will execute once after all tests in the first describe block + +```typescript +import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar", () => { + afterAll(() => { + store.remove("Gravatar", "0x1") + ... + }) + + test("It creates a new entity with Id 0x0", () => { + ... + }) + + test("It creates a new entity with Id 0x1", () => { + ... + }) +}) + +describe("handleUpdatedGravatar", () => { + test("updates Gravatar with id 0x0", () => { + ... + }) +}) +``` + +--- + +### beforeEach() + +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. + +Examples: Code inside `beforeEach` will execute before each tests. + +```typescript +import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" +import { handleNewGravatars } from "./utils" + +beforeEach(() => { + clearStore() // <-- clear the store before each test in the file +}) + +describe("handleNewGravatars, () => { + test("A test that requires a clean store", () => { + ... + }) + + test("Second that requires a clean store", () => { + ... + }) +}) + + ... +``` + +Code inside `beforeEach` will execute only before each test in the that describe + +```typescript +import { describe, test, beforeEach } from 'matchstick-as/assembly/index' +import { handleUpdatedGravatar, handleNewGravatar } from '../../src/gravity' + +describe('handleUpdatedGravatars', () => { + beforeEach(() => { + let gravatar = new Gravatar('0x0') + gravatar.displayName = 'First Gravatar' + gravatar.imageUrl = '' + gravatar.save() + }) + + test('Upates the displayName', () => { + assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') + + // code that should update the displayName to 1st Gravatar + + assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') + store.remove('Gravatar', '0x0') + }) + + test('Updates the imageUrl', () => { + assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') + + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + + assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') + store.remove('Gravatar', '0x0') + }) +}) +``` + +--- + +### afterEach() + +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. + +Examples: + +Code inside `afterEach` will execute after every test. + +```typescript +import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" + +beforeEach(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = “First Gravatar” + gravatar.save() +}) + +afterEach(() => { + store.remove("Gravatar", "0x0") +}) + +describe("handleNewGravatar", () => { + ... +}) + +describe("handleUpdatedGravatar", () => { + test("Upates the displayName", () => { + assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") + + // code that should update the displayName to 1st Gravatar + + assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") + }) + + test("Updates the imageUrl", () => { + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") + + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") + }) +}) +``` + +Code inside `afterEach` will execute after each test in that describe + +```typescript +import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" + +describe("handleNewGravatar", () => { + ... +}) + +describe("handleUpdatedGravatar", () => { + beforeEach(() => { + let gravatar = new Gravatar("0x0") + gravatar.displayName = "First Gravatar" + gravatar.imageUrl = "" + gravatar.save() + }) + + afterEach(() => { + store.remove("Gravatar", "0x0") + }) + + test("Upates the displayName", () => { + assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") + + // code that should update the displayName to 1st Gravatar + + assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") + }) + + test("Updates the imageUrl", () => { + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") + + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + + assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") + }) +}) +``` + +## Asserts + +```typescript +fieldEquals(entityType: string, id: string, fieldName: string, expectedVal: string) + +equals(expected: ethereum.Value, actual: ethereum.Value) + +notInStore(entityType: string, id: string) + +addressEquals(address1: Address, address2: Address) + +bytesEquals(bytes1: Bytes, bytes2: Bytes) + +i32Equals(number1: i32, number2: i32) + +bigIntEquals(bigInt1: BigInt, bigInt2: BigInt) + +booleanEquals(bool1: boolean, bool2: boolean) + +stringEquals(string1: string, string2: string) + +arrayEquals(array1: Array, array2: Array) + +tupleEquals(tuple1: ethereum.Tuple, tuple2: ethereum.Tuple) + +assertTrue(value: boolean) + +assertNull(value: T) + +assertNotNull(value: T) + +entityCount(entityType: string, expectedCount: i32) +``` + +## Write a Unit Test + +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). + +Assuming we have the following handler function (along with two helper functions to make our life easier): + +```typescript +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id.toHex()) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleNewGravatars(events: NewGravatar[]): void { + events.forEach((event) => { + handleNewGravatar(event) + }) +} + +export function createNewGravatarEvent( + id: i32, + ownerAddress: string, + displayName: string, + imageUrl: string, +): NewGravatar { + let mockEvent = newMockEvent() + let newGravatarEvent = new NewGravatar( + mockEvent.address, + mockEvent.logIndex, + mockEvent.transactionLogIndex, + mockEvent.logType, + mockEvent.block, + mockEvent.transaction, + mockEvent.parameters, + ) + newGravatarEvent.parameters = new Array() + let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) + let addressParam = new ethereum.EventParam( + 'ownderAddress', + ethereum.Value.fromAddress(Address.fromString(ownerAddress)), + ) + let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) + let imageUrlParam = new ethereum.EventParam('imageUrl', ethereum.Value.fromString(imageUrl)) + + newGravatarEvent.parameters.push(idParam) + newGravatarEvent.parameters.push(addressParam) + newGravatarEvent.parameters.push(displayNameParam) + newGravatarEvent.parameters.push(imageUrlParam) + + return newGravatarEvent +} +``` + +We first have to create a test file in our project. This is an example of how that might look like: + +```typescript +import { clearStore, test, assert } from 'matchstick-as/assembly/index' +import { Gravatar } from '../../generated/schema' +import { NewGravatar } from '../../generated/Gravity/Gravity' +import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' + +test('Can call mappings with custom events', () => { + // Create a test entity and save it in the store as initial state (optional) + let gravatar = new Gravatar('gravatarId0') + gravatar.save() + + // Create mock events + let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + + // Call mapping functions passing the events we just created + handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) + + // Assert the state of the store + assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') + assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') + assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') + + // Clear the store in order to start the next test off on a clean slate + clearStore() +}) + +test('Next test', () => { + //... +}) +``` + +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: + +- We're setting up our initial state and adding one custom Gravatar entity; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; +- We assert the state of the store. How does that work? - We're passing a unique combination of Entity type and id. Then we check a specific field on that Entity and assert that it has the value we expect it to have. We're doing this both for the initial Gravatar Entity we added to the store, as well as the two Gravatar entities that gets added when the handler function is called; +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. + +There we go - we've created our first test! 👏 + +Now in order to run our tests you simply need to run the following in your subgraph root folder: + +`graph test Gravity` + +And if all goes well you should be greeted with the following: + +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) + +## Common test scenarios + +### Hydrating the store with a certain state + +Users are able to hydrate the store with a known set of entities. Here's an example to initialise the store with a Gravatar entity: + +```typescript +let gravatar = new Gravatar('entryId') +gravatar.save() +``` + +### Calling a mapping function with an event + +A user can create a custom event and pass it to a mapping function that is bound to the store: + +```typescript +import { store } from 'matchstick-as/assembly/store' +import { NewGravatar } from '../../generated/Gravity/Gravity' +import { handleNewGravatars, createNewGravatarEvent } from './mapping' + +let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + +handleNewGravatar(newGravatarEvent) +``` + +### Calling all of the mappings with event fixtures + +Users can call the mappings with test fixtures. + +```typescript +import { NewGravatar } from '../../generated/Gravity/Gravity' +import { store } from 'matchstick-as/assembly/store' +import { handleNewGravatars, createNewGravatarEvent } from './mapping' + +let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + +let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + +handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) +``` + +``` +export function handleNewGravatars(events: NewGravatar[]): void { + events.forEach(event => { + handleNewGravatar(event); + }); +} +``` + +### Mocking contract calls + +Users can mock contract calls: + +```typescript +import { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' +import { Gravity } from '../../generated/Gravity/Gravity' +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') +let expectedResult = Address.fromString('0x90cBa2Bbb19ecc291A12066Fd8329D65FA1f1947') +let bigIntParam = BigInt.fromString('1234') +createMockedFunction(contractAddress, 'gravatarToOwner', 'gravatarToOwner(uint256):(address)') + .withArgs([ethereum.Value.fromSignedBigInt(bigIntParam)]) + .returns([ethereum.Value.fromAddress(Address.fromString('0x90cBa2Bbb19ecc291A12066Fd8329D65FA1f1947'))]) + +let gravity = Gravity.bind(contractAddress) +let result = gravity.gravatarToOwner(bigIntParam) + +assert.equals(ethereum.Value.fromAddress(expectedResult), ethereum.Value.fromAddress(result)) +``` + +As demonstrated, in order to mock a contract call and hardcore a return value, the user must provide a contract address, function name, function signature, an array of arguments, and of course - the return value. + +Users can also mock function reverts: + +```typescript +let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') +createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(string,string)') + .withArgs([ethereum.Value.fromAddress(contractAddress)]) + .reverts() +``` + +### Mocking IPFS files (from matchstick 0.4.1) + +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. + +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: + +`.test.ts` file: + +```typescript +import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' +import { ipfs } from '@graphprotocol/graph-ts' +import { gravatarFromIpfs } from './utils' + +// Export ipfs.map() callback in order for matchstck to detect it +export { processGravatar } from './utils' + +test('ipfs.cat', () => { + mockIpfsFile('ipfsCatfileHash', 'tests/ipfs/cat.json') + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) + + gravatarFromIpfs() + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 1) + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'imageUrl', 'https://i.ytimg.com/vi/MELP46s8Cic/maxresdefault.jpg') + + clearStore() +}) + +test('ipfs.map', () => { + mockIpfsFile('ipfsMapfileHash', 'tests/ipfs/map.json') + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) + + ipfs.map('ipfsMapfileHash', 'processGravatar', Value.fromString('Gravatar'), ['json']) + + assert.entityCount(GRAVATAR_ENTITY_TYPE, 3) + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'displayName', 'Gravatar1') + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '2', 'displayName', 'Gravatar2') + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '3', 'displayName', 'Gravatar3') +}) +``` + +`utils.ts` file: + +```typescript +import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" +import { Gravatar } from "../../generated/schema" + +... + +// ipfs.map callback +export function processGravatar(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + + if (!id) { + return + } + + // Callbacks can also created entities + let gravatar = new Gravatar(id.toString()) + gravatar.displayName = userData.toString() + id.toString() + gravatar.save() +} + +// function that calls ipfs.cat +export function gravatarFromIpfs(): void { + let rawData = ipfs.cat("ipfsCatfileHash") + + if (!rawData) { + return + } + + let jsonData = json.fromBytes(rawData as Bytes).toObject() + + let id = jsonData.get('id') + let url = jsonData.get("imageUrl") + + if (!id || !url) { + return + } + + let gravatar = new Gravatar(id.toString()) + gravatar.imageUrl = url.toString() + gravatar.save() +} +``` + +### Asserting the state of the store + +Users are able to assert the final (or midway) state of the store through asserting entities. In order to do this, the user has to supply an Entity type, the specific ID of an Entity, a name of a field on that Entity, and the expected value of the field. Here's a quick example: + +```typescript +import { assert } from 'matchstick-as/assembly/index' +import { Gravatar } from '../generated/schema' + +let gravatar = new Gravatar('gravatarId0') +gravatar.save() + +assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') +``` + +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. + +### Interacting with Event metadata + +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: + +```typescript +// Read +let logType = newGravatarEvent.logType + +// Write +let UPDATED_ADDRESS = '0xB16081F360e3847006dB660bae1c6d1b2e17eC2A' +newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) +``` + +### Asserting variable equality + +```typescript +assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); +``` + +### Asserting that an Entity is **not** in the store + +Users can assert that an entity does not exist in the store. The function takes an entity type and an id. If the entity is in fact in the store, the test will fail with a relevant error message. Here's a quick example of how to use this functionality: + +```typescript +assert.notInStore('Gravatar', '23') +``` + +### Printing the whole store (for debug purposes) + +You can print the whole store to the console using this helper function: + +```typescript +import { logStore } from 'matchstick-as/assembly/store' + +logStore() +``` + +### Expected failure + +Users can have expected test failures, using the shouldFail flag on the test() functions: + +```typescript +test( + 'Should throw an error', + () => { + throw new Error() + }, + true, +) +``` + +If the test is marked with shouldFail = true but DOES NOT fail, that will show up as an error in the logs and the test block will fail. Also, if it's marked with shouldFail = false (the default state), the test executor will crash. + +### Logging + +Having custom logs in the unit tests is exactly the same as logging in the mappings. The difference is that the log object needs to be imported from matchstick-as rather than graph-ts. Here's a simple example with all non-critical log types: + +```typescript +import { test } from "matchstick-as/assembly/index"; +import { log } from "matchstick-as/assembly/log"; + +test("Success", () => { + log.success("Success!". []); +}); +test("Error", () => { + log.error("Error :( ", []); +}); +test("Debug", () => { + log.debug("Debugging...", []); +}); +test("Info", () => { + log.info("Info!", []); +}); +test("Warning", () => { + log.warning("Warning!", []); +}); +``` + +Users can also simulate a critical failure, like so: + +```typescript +test('Blow everything up', () => { + log.critical('Boom!') +}) +``` + +Logging critical errors will stop the execution of the tests and blow everything up. After all - we want to make sure you're code doesn't have critical logs in deployment, and you should notice right away if that were to happen. + +### Testing derived fields + +Testing derived fields is a feature which (as the example below shows) allows the user to set a field in a certain entity and have another entity be updated automatically if it derives one of its fields from the first entity. Important thing to note is that the first entity needs to be reloaded as the automatic update happens in the store in rust of which the AS code is agnostic. + +```typescript +test('Derived fields example test', () => { + let mainAccount = new GraphAccount('12') + mainAccount.save() + let operatedAccount = new GraphAccount('1') + operatedAccount.operators = ['12'] + operatedAccount.save() + let nst = new NameSignalTransaction('1234') + nst.signer = '12' + nst.save() + + assert.assertNull(mainAccount.get('nameSignalTransactions')) + assert.assertNull(mainAccount.get('operatorOf')) + + mainAccount = GraphAccount.load('12')! + + assert.i32Equals(1, mainAccount.nameSignalTransactions.length) + assert.stringEquals('1', mainAccount.operatorOf[0]) +}) +``` + +### Testing dynamic data sources + +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). + +Example below: + +First we have the following event handler (which has been intentionally repurposed to showcase datasource mocking): + +```typescript +export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { + let tokenLockWallet = TokenLockWallet.load(dataSource.address().toHexString())! + if (dataSource.network() == 'rinkeby') { + tokenLockWallet.tokenDestinationsApproved = true + } + let context = dataSource.context() + if (context.get('contextVal')!.toI32() > 0) { + tokenLockWallet.setBigInt('tokensReleased', BigInt.fromI32(context.get('contextVal')!.toI32())) + } + tokenLockWallet.save() +} +``` + +And then we have the test using one of the methods in the dataSourceMock namespace to set a new return value for all of the dataSource functions: + +```typescript +import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' +import { BigInt, DataSourceContext, Value } from '@graphprotocol/graph-ts' + +import { handleApproveTokenDestinations } from '../../src/token-lock-wallet' +import { ApproveTokenDestinations } from '../../generated/templates/GraphTokenLockWallet/GraphTokenLockWallet' +import { TokenLockWallet } from '../../generated/schema' + +test('Data source simple mocking example', () => { + let addressString = '0xA16081F360e3847006dB660bae1c6d1b2e17eC2A' + let address = Address.fromString(addressString) + + let wallet = new TokenLockWallet(address.toHexString()) + wallet.save() + let context = new DataSourceContext() + context.set('contextVal', Value.fromI32(325)) + dataSourceMock.setReturnValues(addressString, 'rinkeby', context) + let event = changetype(newMockEvent()) + + assert.assertTrue(!wallet.tokenDestinationsApproved) + + handleApproveTokenDestinations(event) + + wallet = TokenLockWallet.load(address.toHexString())! + assert.assertTrue(wallet.tokenDestinationsApproved) + assert.bigIntEquals(wallet.tokensReleased, BigInt.fromI32(325)) + + dataSourceMock.resetValues() +}) +``` + +Notice that dataSourceMock.resetValues() is called at the end. That's because the values are remembered when they are changed and need to be reset if you want to go back to the default values. + +## Test Coverage + +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. + +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. + +### Prerequisites + +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: + +#### Export your handlers + +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: + +```typescript +import { handleNewGravatar } from '../../src/gravity' +``` + +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: + +```typescript +export { handleNewGravatar } +``` + +### Usage + +Once that's all set up, to run the test coverage tool, simply run: + +```sh +graph test -- -c +``` + +You could also add a custom `coverage` command to your `package.json` file, like so: + +```typescript + "scripts": { + /.../ + "coverage": "graph test -- -c" + }, +``` + +That will execute the coverage tool and you should see something like this in the terminal: + +```sh +$ graph test -c +Skipping download/install step because binary already exists at /Users/petko/work/demo-subgraph/node_modules/binary-install-raw/bin/0.4.0 + +___ ___ _ _ _ _ _ +| \/ | | | | | | | (_) | | +| . . | __ _| |_ ___| |__ ___| |_ _ ___| | __ +| |\/| |/ _` | __/ __| '_ \/ __| __| |/ __| |/ / +| | | | (_| | || (__| | | \__ \ |_| | (__| < +\_| |_/\__,_|\__\___|_| |_|___/\__|_|\___|_|\_\ + +Compiling... + +Running in coverage report mode. + ️ +Reading generated test modules... 🔎️ + +Generating coverage report 📝 + +Handlers for source 'Gravity': +Handler 'handleNewGravatar' is tested. +Handler 'handleUpdatedGravatar' is not tested. +Handler 'handleCreateGravatar' is tested. +Test coverage: 66.7% (2/3 handlers). + +Handlers for source 'GraphTokenLockWallet': +Handler 'handleTokensReleased' is not tested. +Handler 'handleTokensWithdrawn' is not tested. +Handler 'handleTokensRevoked' is not tested. +Handler 'handleManagerUpdated' is not tested. +Handler 'handleApproveTokenDestinations' is not tested. +Handler 'handleRevokeTokenDestinations' is not tested. +Test coverage: 0.0% (0/6 handlers). + +Global test coverage: 22.2% (2/9 handlers). +``` + +### Test run time duration in the log output + +The log output includes the test run duration. Here's an example: + +`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` + +## Common compiler errors + +> Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined + +This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/developing/assemblyscript-api/#logging-api) + +> ERROR TS2554: Expected ? arguments, but got ?. +> +> return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt); +> +> in ~lib/matchstick-as/assembly/defaults.ts(18,12) +> +> ERROR TS2554: Expected ? arguments, but got ?. +> +> return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); +> +> in ~lib/matchstick-as/assembly/defaults.ts(24,12) + +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. + +## Feedback + +If you have any questions, feedback, feature requests or just want to reach out, the best place would be The Graph Discord where we have a dedicated channel for Matchstick, called 🔥| unit-testing. diff --git a/website/pages/cs/docsearch.json b/website/pages/cs/docsearch.json new file mode 100644 index 000000000000..8cfff967936d --- /dev/null +++ b/website/pages/cs/docsearch.json @@ -0,0 +1,42 @@ +{ + "button": { + "buttonText": "Search", + "buttonAriaLabel": "Search" + }, + "modal": { + "searchBox": { + "resetButtonTitle": "Clear the query", + "resetButtonAriaLabel": "Clear the query", + "cancelButtonText": "Cancel", + "cancelButtonAriaLabel": "Cancel" + }, + "startScreen": { + "recentSearchesTitle": "Recent", + "noRecentSearchesText": "No recent searches", + "saveRecentSearchButtonTitle": "Save this search", + "removeRecentSearchButtonTitle": "Remove this search from history", + "favoriteSearchesTitle": "Favorite", + "removeFavoriteSearchButtonTitle": "Remove this search from favorites" + }, + "errorScreen": { + "titleText": "Unable to fetch results", + "helpText": "You might want to check your network connection." + }, + "footer": { + "selectText": "to select", + "selectKeyAriaLabel": "Enter key", + "navigateText": "to navigate", + "navigateUpKeyAriaLabel": "Arrow up", + "navigateDownKeyAriaLabel": "Arrow down", + "closeText": "to close", + "closeKeyAriaLabel": "Escape key", + "searchByText": "Search by" + }, + "noResultsScreen": { + "noResultsText": "No results for", + "suggestedQueryText": "Try searching for", + "reportMissingResultsText": "Believe this query should return results?", + "reportMissingResultsLinkText": "Let us know." + } + } +} diff --git a/website/pages/cs/global.json b/website/pages/cs/global.json new file mode 100644 index 000000000000..6a3eb234bfce --- /dev/null +++ b/website/pages/cs/global.json @@ -0,0 +1,14 @@ +{ + "collapse": "Collapse", + "expand": "Expand", + "previous": "Previous", + "next": "Next", + "editPage": "Edit page", + "pageSections": "Page Sections", + "linkToThisSection": "Link to this section", + "technicalLevelRequired": "Technical Level Required", + "notFoundTitle": "Oops! This page was lost in space...", + "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "goHome": "Go Home", + "video": "Video" +} diff --git a/website/pages/cs/glossary.mdx b/website/pages/cs/glossary.mdx new file mode 100644 index 000000000000..2e840513f1ea --- /dev/null +++ b/website/pages/cs/glossary.mdx @@ -0,0 +1,89 @@ +--- +title: Glossary +--- + +- **The Graph**: A decentralized protocol for indexing and querying data. + +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. + +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. + +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. + +- **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. + +- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. + +- **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. + +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. + + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + +- **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. + +- **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. + +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. + +- **Curators**: Network participants that identify high-quality subgraphs, and “curate” them (i.e., signal GRT on them) in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. Indexers earn indexing rewards proportional to the signal on a subgraph. We see a correlation between the amount of GRT signalled and the number of Indexers indexing a subgraph. + +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. + +- **Subgraph Consumer**: Any application or user that queries a subgraph. + +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. + +- **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. + +- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. + +- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. + +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. + + 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. + + 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. + + 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. + +- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. + +- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. + +- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. + +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. + +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. + +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. + +- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. + +- **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. + +- **Indexer agent**: The Indexer agent is part of the indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. + +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. + +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. + +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. + +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. + +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. + +- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. + +- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. + +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/cs/graphcast.mdx b/website/pages/cs/graphcast.mdx new file mode 100644 index 000000000000..e397aad36e43 --- /dev/null +++ b/website/pages/cs/graphcast.mdx @@ -0,0 +1,21 @@ +--- +title: Graphcast +--- + +## Introduction + +Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? + +Currently, the cost to broadcast information to other network participants is determined by gas fees on the Ethereum blockchain. Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models. + +The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: + +- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. + +### Learn More + +If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) diff --git a/website/pages/cs/index.json b/website/pages/cs/index.json new file mode 100644 index 000000000000..9e28e13d5001 --- /dev/null +++ b/website/pages/cs/index.json @@ -0,0 +1,77 @@ +{ + "title": "Get Started", + "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", + "shortcuts": { + "aboutTheGraph": { + "title": "About The Graph", + "description": "Learn more about The Graph" + }, + "quickStart": { + "title": "Quick Start", + "description": "Jump in and start with The Graph" + }, + "developerFaqs": { + "title": "Developer FAQs", + "description": "Frequently asked questions" + }, + "queryFromAnApplication": { + "title": "Query from an Application", + "description": "Learn to query from an application" + }, + "createASubgraph": { + "title": "Create a Subgraph", + "description": "Use Studio to create subgraphs" + }, + "migrateFromHostedService": { + "title": "Migrate from the Hosted Service", + "description": "Migrating subgraphs to The Graph Network" + } + }, + "networkRoles": { + "title": "Network Roles", + "description": "Learn about The Graph’s network roles.", + "roles": { + "developer": { + "title": "Developer", + "description": "Create a subgraph or use existing subgraphs in a dapp" + }, + "indexer": { + "title": "Indexer", + "description": "Operate a node to index data and serve queries" + }, + "curator": { + "title": "Curator", + "description": "Organize data by signaling on subgraphs" + }, + "delegator": { + "title": "Delegator", + "description": "Secure the network by delegating GRT to Indexers" + } + } + }, + "readMore": "Read more", + "products": { + "title": "Products", + "products": { + "subgraphStudio": { + "title": "Subgraph Studio", + "description": "Create, manage and publish subgraphs and API keys" + }, + "graphExplorer": { + "title": "Graph Explorer", + "description": "Explore subgraphs and interact with the protocol" + }, + "hostedService": { + "title": "Hosted Service", + "description": "Create and explore subgraphs on the Hosted Service" + } + } + }, + "supportedNetworks": { + "title": "Supported Networks", + "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", + "graphNetworkAndHostedService": "The Graph Network & Hosted Service", + "hostedService": "Hosted Service", + "betaWarning": "In beta." + } +} diff --git a/website/pages/cs/managing/_meta.js b/website/pages/cs/managing/_meta.js new file mode 100644 index 000000000000..a7c7b3d79464 --- /dev/null +++ b/website/pages/cs/managing/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/managing/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/managing/deprecating-a-subgraph.mdx b/website/pages/cs/managing/deprecating-a-subgraph.mdx new file mode 100644 index 000000000000..e6adfccad368 --- /dev/null +++ b/website/pages/cs/managing/deprecating-a-subgraph.mdx @@ -0,0 +1,18 @@ +--- +title: Deprecating a Subgraph +--- + +So you'd like to deprecate your subgraph on The Graph Explorer. You've come to the right place! Follow the steps below: + +1. Visit the contract address [here](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) +2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. +3. Voilà! Your subgraph will no longer show up on searches on The Graph Explorer. + +Please note the following: + +- The `deprecateSubgraph` function should be called by the owner's wallet. +- Curators will not be able to signal on the subgraph anymore. +- Curators that already signaled on the subgraph will be able to withdraw their signal at an average share price. +- Deprecated subgraphs will be indicated with an error message. + +If you interacted with the deprecated subgraph, you'll be able to find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/cs/managing/transferring-subgraph-ownership.mdx b/website/pages/cs/managing/transferring-subgraph-ownership.mdx new file mode 100644 index 000000000000..1ca1c621a9c9 --- /dev/null +++ b/website/pages/cs/managing/transferring-subgraph-ownership.mdx @@ -0,0 +1,39 @@ +--- +title: Transferring Subgraph Ownership +--- + +The Graph supports the transfer of the ownership of a subgraph. + +When you deploy a subgraph to mainnet, an NFT will be minted to the address that deployed the subgraph. The NFT is based on a standard ERC721, so it can be easily transferred to different accounts. + +Whoever owns the NFT controls the subgraph. If the owner decides to sell the NFT, or transfer it, they will no longer be able to make edits or updates to that subgraph on the network. + +In addition to adding more flexibility to the development lifecycle, this functionality makes certain use cases more convenient, such as moving your control to a multisig or a community member creating it on behalf of a DAO. + +## Viewing your subgraph as an NFT + +To view your subgraph as an NFT, you can visit an NFT marketplace like OpenSea: + +``` +https://opensea.io/your-wallet-address +``` + +Or a wallet explorer like **Rainbow.me**: + +``` +https://rainbow.me/your-wallet-addres +``` + +## Transferring ownership of a subgraph + +To transfer ownership of a subgraph, you can use the UI built into Subgraph Studio: + +![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) + +And then choose the address that you would like to transfer the subgraph to: + +![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + +You can also use the built-in UI of NFT marketplaces like OpenSea: + +![Subgraph Ownership Trasfer from NFT marketplace](/img/subgraph-ownership-transfer-nft-marketplace.png) diff --git a/website/pages/cs/mips-faqs.mdx b/website/pages/cs/mips-faqs.mdx new file mode 100644 index 000000000000..73efe82662cb --- /dev/null +++ b/website/pages/cs/mips-faqs.mdx @@ -0,0 +1,125 @@ +--- +title: MIPs FAQs +--- + +## Introduction + +It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. + +To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). + +The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. + +The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. + +### Useful Resources + +- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) +- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) +- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) +- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) +- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) + +### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? + +Yes, it is indeed. + +For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. + +A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). + +### 2. Which chain will the MIPs program incentivise first? + +The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. + +### 3. How will new chains be added to the MIPs program? + +New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. + +### 4. How will we know when the network is ready for a new chain? + +The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. + +### 5. How are rewards divided per chain? + +Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. + +### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? + +You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. + +### 7. When will rewards be distributed? + +MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. + +### 8. How does scoring work? + +Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: + +**Subgraph Coverage** + +- Are you providing maximal support for subgraphs per chain? + +- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. + +**Quality Of Service** + +- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? + +- Is the Indexer supporting dapp developers being reactive to their needs? + +Is Indexer allocating efficiently, contributing to the overall health of the network? + +**Community Support** + +- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? + +- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? + +### 9. How will the Discord role be assigned? + +Moderators will assign the roles in the next few days. + +### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? + +Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. + +### 11. At what point do you expect participants to add a mainnet deployment? + +There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) + +### 12. Will rewards be subject to vesting? + +The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. + +### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? + +Yes + +### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? + +Yes + +### 15. During the MIPs program, will there be a period to dispute invalid POI? + +To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation + +### 17. Can we combine two vesting contracts? + +No. The options are: you can delegate one to the other one or run two separate indexers. + +### 18. KYC Questions? + +Please email info@thegraph.foundation + +### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? + +Yes + +### 20. Are there recommended regions to run the servers? + +We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. + +### 21. What is “handler gas cost”? + +It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/cs/network-transition-faq.mdx b/website/pages/cs/network-transition-faq.mdx new file mode 100644 index 000000000000..1826dd4dfb73 --- /dev/null +++ b/website/pages/cs/network-transition-faq.mdx @@ -0,0 +1,245 @@ +--- +title: Network Transition FAQ +--- + +Developers will have plenty of time to migrate their subgraphs to the decentralized network. Exact timelines will vary from network to network based on Indexer and network readiness-the hosted service will not end support for all networks at once and will not be sunset abruptly. + +Each network on the hosted service, including Ethereum, will sunset gradually as it is supported on the decentralized network to achieve feature parity and a high quality of service. This will happen on a network-to-network basis with help from Indexers in the [MIPs program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program/), to enable full support for each network on the decentralized network. + +To add more clarity around continued support for each network on the hosted service, these FAQs answer common questions regarding the specifics of the network transition process. If you would like to start the subgraph migration process now, here is a [step-by-step guide](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph). To skip to the migration FAQ, [click here](#migration-faqs). + +## Hosted Service Sunset FAQs + +### Will I have to migrate my subgraph before the decentralized network serves core functionalities for subgraphs? + +Subgraph developers can begin migrating their Ethereum mainnet subgraphs now, but will not be forced to migrate subgraphs to the network before feature core functionality exists for the decentralized network and hosted service. Migration of Gnosis network subgraphs will also begin soon, with other networks to follow once Indexers have tested the networks and are ready to index them in production. + +### What is the timeline and process for deprecating the hosted service? + +All networks will have their own timelines, depending on when they are enabled on the network and the timeline it takes to get through each phase. Core developers are working to migrate the majority of hosted service traffic to the decentralized network as soon as possible. + +Most importantly, you will not lose access to the hosted service before core functionality is available for your specific network/subgraph on the decentralized network. + +The three distinct phases of hosted service deprecation for each network are: + +#### Phase 1 (The Sunray): Disable new subgraph creation for blockchains that have quality parity on the network + +In this stage, developers will no longer be able to deploy new subgraphs to the hosted service for that network. Developers will still be able to update existing subgraphs on the hosted service. + +No network has yet begun Phase 1 of transitioning from the hosted service to the decentralized network. + +As networks enter Phase 1, please note that developers can still use the rate limited Developer Preview URL in the Subgraph Studio to develop and test their subgraphs (up to 1,000 free queries) without acquiring GRT or interacting with protocol economics. + +#### Phase 2 (The Sunbeam): Disable subgraph updates + +In this phase, updates to subgraphs must be made through Subgraph Studio and subsequently published to the decentralized network. Hosted service subgraphs for networks in this phase will still exist and will be queryable, but updates to subgraphs must be made on The Graph's decentralized network. + +There are no exact timelines for when any network will move to this phase, as the process is driven by exit criteria surrounding core functionality, not dates. + +#### Phase 3 (The Sunrise): Disable querying subgraphs + +At this phase, subgraphs on the hosted service for networks supported by The Graph Network will no longer process queries. The only way to query blockchain data for subgraphs on networks in this phase will be through the decentralized network. Test queries will still be available in [Subgraph Studio](https://thegraph.com/studio/) via the Development Query URL. + +Networks will not move to Phase 3 until successfully moving to Phase 2 and giving developers ample time to migrate to the decentralized network. + +![subgraph chart](/img/subgraph-chart.png) + +> Note: This diagram reflects the per-network sunsetting process. Hosted service sunsetting times will vary and will not sunset all at once. + +### What happens to test networks like Goerli, Mumbai, etc? + +All networks and test networks are eligible for a free Deployment Query URL in the [Subgraph Studio](https://thegraph.com/studio/). This URL is rate limited and intended for test and development traffic. Production traffic will require a subgraph published to The Graph Network in order to have production grade redundancy and stability. + +![Rate limit](/img/rate-limit.png) + +### Does The Graph Network have the same functionalities as the hosted service? + +Indexers on The Graph Network run the most recent network-approved [release of Graph Node](https://github.com/graphprotocol/graph-node/releases), and can support any subgraph features supported in that release. + +Sometimes unreleased features which are still under development might be available first on the Developer Preview URL, which runs the latest main commit of [Graph Node](https://github.com/graphprotocol/graph-node). These features will then become available on the network with the next Graph Node release. + +Certain subgraph features are not eligible for indexing rewards, if they are not deterministic or verifiable on the network. Specific examples are fetching files from IPFS, and indexing networks not yet supported on The Graph Network. + +Subgraphs with these features can be published to the network, but they may not be picked up by Indexers. However, subgraphs with sufficient signal may still attract Indexers interested in collecting query fees, which any subgraph is eligible for. + +### How much does The Graph Network cost in comparison to running my own infrastructure? + +The Graph's decentralized network is 60-90% less expensive than running dedicated infrastructure, as shown in [these case studies](https://thegraph.com/docs/en/network/benefits/#low-volume-user-less-than-30000-queries-per-month). + +### Is there anything I should do with my hosted service subgraph after I migrate to the network? + +Hiding your hosted service subgraph is strongly recommended to avoid confusion. [This video](https://www.loom.com/share/7cffd2a7845e4fbd8c51f45c516cb7f9) walks through the process. + +### When will the decentralized network support my preferred network? + +There is no set timeline per network, they will be dictated by Indexer readiness via the [MIPs program](https://thegraph.com/migration-incentive-program/) where new networks are tested by Indexers. As new networks are supported on the network, users will receive ample notification to prepare for migration. Core devs and contributors to The Graph ecosystem are working to implement support for more networks as soon as possible. + +### Is Ethereum mainnet entering Phase 1 of the network transition process? + +While Ethereum was initially anticipated to begin transition off of the hosted service by the end of Q3 2022, this has been [postponed](https://thegraph.com/blog/transitioning-to-decentralized-graph-network) to address user feedback. Additional improvements to user experience, billing, and other fulfillments of user requests will drive Ethereum's hosted service transition timeline. Stay up to date on when Ethereum will enter The Sunray phase via the integration status tracker below and via [The Graph Twitter.](http://www.twitter.com/graphprotocol) + +### The Graph Network integration status tracker + +The table below illustrates where each network is in the network integration process. If your preferred network is not yet listed, integration has not yet begun, and that network is still fully supported by The Graph's hosted service. + +> This table will not include test networks, which remain free in [Subgraph Studio](https://thegraph.com/studio/). + +| Network | Announcing integration on The Graph Network | Network Integration complete | Phase 1: disable new subgraphs on hosted service | Phase 2: disable subgraph updates on hosted service | Phase 3: disable subgraphs on hosted service | +| --- | :-: | :-: | :-: | :-: | :-: | +| Ethereum | ✓ | ✓ | | | | +| Gnosis (formerly xDAI) | ✓ | ✓\* | | | | +| Polygon | ✓ | | | | | +| Celo | ✓ | ✓\* | | | | +| Arbitrum One | ✓ | ✓\* | | | | +| Avalanche | ✓ | ✓\* | | | | +| Optimism | ✓ | | | | | +| Fantom | ✓ | | | | | + +\* The network is currently in beta on The Graph's decentralized network. + +## Query Fees, API Keys, and Billing FAQs + +### How are query fees priced? + +Query fee prices are impacted by query demand on the decentralized network. Core developers created a query pricing cost model language called [Agora](https://github.com/graphprotocol/agora). It enables Indexers to price queries efficiently. Learn more in the [Agora documentation](https://github.com/graphprotocol/agora/blob/master/docs/README.md). + +### How can I set a maximum query budget? + +Users can set a max query budget in the Subgraph Studio [API Key](https://thegraph.com/studio/apikeys/) section, under the Budget tab. [Watch this video](https://www.loom.com/share/b5fc533e48584cb694017392c80c75e0) for an overview of that process, as well as adjusting other parts of your API Key. + +Please note that setting your max query budget too low will exclude Indexers, potentially leading to poor quality service in the form of failed queries, slow queries, etc. + +As of the end of September 2022, it's best practice to stay within the $0.00035-$0.0004 range as the lowest max query budget. + +### How can I protect my API Key? + +Users are encouraged to restrict the API key by both subgraph and domain in the [Subgraph Studio](https://thegraph.com/studio/): + +![Restrict domain](/img/restrictdomain.png) + +### How do I fill up my API key to pay for query fees? + +You can fill up your billing balance in the Subgraph Studio [Billing Dashboard](https://thegraph.com/studio/billing/) by pressing the "Add GRT" button. There is ongoing work to improve this experience to add more seamless and recurring payments. + +[This video](https://www.loom.com/share/a81de6ef11d64c62872ea210c58c6af5) has an overview of that process. + +### How do I set alerts for low billing balances in my API key? + +Users should set a billing alert to their email address [here](https://thegraph.com/studio/settings/). + +Also, a banner will flash within a user's UI to warn when a billing balance is getting low. + +What are the best practices for managing my API key settings? + +A max query budget of $0.0004 is recommended to maintain low average query prices while maintaining high quality of service. This can be done in the budget billing tab of the [API Key section](https://thegraph.com/studio/apikeys/). + +## Migration FAQs + +### How can I migrate my subgraph to The Graph's decentralized network? + +Learn how to migrate your subgraph to The Graph Network with this simple [step-by-step guide](https://thegraph.com/blog/how-to-migrate-ethereum-subgraph) or [this video](https://www.youtube.com/watch?v=syXwYEk-VnU&t=1s). + +### Are there Network Grants for subgraphs that migrate early to The Graph Network? + +Yes. To apply for a Network Grant, reach out [here](mailto:migration@thegraph.foundation). + +### Is there any financial/technical/marketing support through the migration process from The Graph ecosystem? + +There are Network Grants for projects to use to curate subgraphs (to attract Indexers) and pay for initial query fees (apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com)), a [direct channel](http://thegraph.com/discord) to engineers to help every step of the way, and prioritized marketing campaigns to showcase your project after migration, exampled in these Twitter threads: [1](https://twitter.com/graphprotocol/status/1496891582401814537), [2](https://twitter.com/graphprotocol/status/1491926128302379008), & [3](https://twitter.com/graphprotocol/status/1491126245396201473). + +### How long do queries take? + +Queries take an average of 150-300 milliseconds on the decentralized network. + +### Is the billing process on The Graph Network more complex than on the hosted service? + +Yes, the UX for the network is not yet at quality parity with the hosted service. The billing UX, in particular, is still in very early stages and there are many moving parts that the core dev teams are working to abstract away from the process. Much of these improvements will be made public in the near future. + +### Can I pay for The Graph Network queries in fiat, credit card, or stablecoins? + +In the coming months, the number of steps that users need to take to pay for their subgraphs will be vastly reduced. While payments will still be made in GRT, efforts to implement a fiat on-ramp and automated payment systems to convert fiat and crypto into GRT to make recurring payments are already underway. + +### Will the network ever have the same UX as the hosted service? + +While there is still work to do, the aim is to offer comparable if not better quality UX on The Graph Network than currently exists on the hosted service. Short term, the aim is to offer a more streamlined and predictable billing experience that helps users focus more time building high-quality dapps. + +### How can I ensure that my subgraph will be picked up by Indexer on The Graph Network? + +It is recommended to curate with at least 10,000 GRT, which users can do in the same transaction as when they publish. Users can also ask the curation community to curate their subgraph [here](https://t.me/CurationStation). + +There are Network Grants for the early migrants to cover these initial costs. Feel free to apply [here](mailto:migration@thegraph.foundation). + +### Why does a subgraph need curation signal? What if there isn't enough signal on my subgraph from curators? + +The higher the curation signal, the more attractive a subgraph is to Indexers, as there is a linear correlation between higher signal and higher indexing rewards. Without curation, there is no incentive for Indexers to pick up a subgraph. + +### What happens to the GRT a subgraph developer uses for curation signal? + +If you are the first to signal a subgraph, your GRT signaled amount will not go down. GRT used for curation can be removed later. Also, Curators get 10% of all query fees taken in by Indexers. + +### What improvements are coming to the curation process? + +Short term, the initial curation model on Arbitrum will provide principle-protection to curation signal. Longer term, the core devs will prioritize offering developers the capacity to rent curation signal, opening up a more predictable pricing experience while still ensuring subgraphs are sufficiently indexed. + +### How do I switch the subgraph API in the front-end? + +After at least one Indexer has fully indexed a subgraph, a user can query the decentralized network. + +In order to retrieve the query URL for your subgraph, you can copy/paste it by clicking on the symbol next to the query URL. You will see something like this: + +`https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` + +Simply replace [api-key] with an API key generated in the Subgraph Studio [API Key section](https://thegraph.com/studio/apikeys/). + +### How much do queries cost? + +The average query cost within the network varies. For the month of September 2022, the average price per query fee cost ranged from $0.00012 - $0.00020. + +### How can I find out how much volume my subgraph has and how much it will cost? + +Hosted service volume data is not public. Please reach out to get volume and cost estimates [here](mailto:migration@thegraph.foundation). + +### How does the gateway work? Is it fully decentralized? + +The gateway process queries so Indexers can serve dapps. The gateways are in an intermediate phase that is being progressively decentralized. More on this soon. + +## Using The Network FAQs + +### Is there a cost to update my subgraph? + +Yes, it is 1% of curation signaled. The 1% is split evenly between Curators (0.5%) and subgraph developers (0.5%). So, for every 10K GRT signaled, it costs subgraph developers 50 GRT to update. + +### How do I speed up sync time? + +Minimize the use of smart contract calls within the subgraph. Accessing a smart contract state requires an eth_call to the RPC, which slows down sync times. + +### Is there multisig support in Subgraph Studio as I migrate? + +Yes, multisig support has recently been added. You can find more information [here](https://thegraph.com/docs/studio/multisig). + +### What are the contract addresses for GRT on Ethereum and Arbitrum? + +- Ethereum: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` +- Ethereum Goerli: `0x5c946740441C12510a167B447B7dE565C20b9E3C` +- Arbitrum: `0x9623063377AD1B27544C965cCd7342f7EA7e88C7` +- Arbitrum Goerli: `0x18c924bd5e8b83b47efadd632b7178e2fd36073d` + +### How much GRT do projects usually keep in their API Key? + +Many projects keep 30-60 days worth of GRT in their API key, so they don't need to refill often. To understand what your 30-60 day GRT fees would be, please reach out [here](mailto:migration@thegraph.foundation). + +### How are query payments made on the decentralized network?  + +Fees are invoiced weekly and pulled out of a user's API Key, with GRT that is bridged to and sits on Arbitrum. + +### How are API keys used for subgraphs on the decentralized network? + +API Keys empower users to have a say in both the max query prices they pay and to prioritize factors like price, economic freshness, and query speed. + +### How does quality of service currently compare between the hosted service and the decentralized network? + +The hosted service and decentralized network have about the same median latency, but the decentralized network tends to have higher latency at higher percentiles. 200 rates for queries are generally similar, with both > 99.9%. As a result of its decentralization, the network has not had a broad outage across subgraphs, whereas the hosted service does on rare occasions have temporary outages as a result of its centralized nature. + +### What if my question isn't answered by these FAQs? + +Please reach out to [migration@thegraph.foundation](mailto:migration@thegraph.foundation) for any additional assistance. diff --git a/website/pages/cs/network/_meta.js b/website/pages/cs/network/_meta.js new file mode 100644 index 000000000000..49858537c885 --- /dev/null +++ b/website/pages/cs/network/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/network/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/network/benefits.mdx b/website/pages/cs/network/benefits.mdx new file mode 100644 index 000000000000..839a0a7b9cf7 --- /dev/null +++ b/website/pages/cs/network/benefits.mdx @@ -0,0 +1,96 @@ +--- +title: The Graph Network vs. Self Hosting +socialImage: https://thegraph.com/docs/img/seo/benefits.jpg +--- + +The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. + +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. + +Here is an analysis: + +## Why You Should Use The Graph Network + +- 60-98% lower monthly cost +- $0 infrastructure setup costs +- Superior uptime +- Access to 438 Indexers (and counting) +- 24/7 technical support by global community + +## The Benefits Explained + +### Lower & more Flexible Cost Structure + +No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $0.0002. Queries are priced in USD and paid in GRT. + +Query costs may vary; the quoted cost is the average at time of publication (December 2022). + +## Low Volume User (less than 30,000 queries per month) + +| Cost Comparison | Self Hosted | Graph Network | +| :-: | :-: | :-: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | ~$15 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 30,000 (autoscaling) | +| Cost per query | $0 | $0.0005 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | ~$15 | + +## Medium Volume User (3,000,000+ queries per month) + +| Cost Comparison | Self Hosted | Graph Network | +| :-: | :-: | :-: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $750 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 3,000,000+ | +| Cost per query | $0 | $0.00025 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $750 | + +## High Volume User (30,000,000+ queries per month) + +| Cost Comparison | Self Hosted | Graph Network | +| :-: | :-: | :-: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $4,500 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 30,000,000+ | +| Cost per query | $0 | $0.00015 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $4,500 | + +\*including costs for backup: $50-$100 per month + +Engineering time based on $200 per hour assumption + +using the max query budget function in the budget billing tab, while maintaining high quality of service + +Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. + +Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). + +Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. + +Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. + +## No Setup Costs & Greater Operational Efficiency + +Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. + +## Reliability & Resiliency + +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. + +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. + +Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/cs/network/curating.mdx b/website/pages/cs/network/curating.mdx new file mode 100644 index 000000000000..797d9b9dd896 --- /dev/null +++ b/website/pages/cs/network/curating.mdx @@ -0,0 +1,96 @@ +--- +title: Curating +--- + +Curators are critical to the Graph decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through the Explorer, curators are able to view network data to make signaling decisions. The Graph Network rewards curators who signal on good quality subgraphs with a share of the query fees that subgraphs generate. Curators are economically incentivized to signal early. These cues from curators are important for Indexers, who can then process or index the data from these signaled subgraphs. + +When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. + +Remember that curation is risky. Please do your diligence to make sure you curate on subgraphs you trust. Creating a subgraph is permissionless, so people can create subgraphs and call them any name they'd like. For more guidance on curation risks, check out [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) + +## Bonding Curve 101 + +First, we take a step back. Each subgraph has a bonding curve on which curation shares are minted when a user adds signal **into** the curve. Each subgraph’s bonding curve is unique. The bonding curves are architected so that the price to mint a curation share on a subgraph increases linearly, over the number of shares minted. + +![Price per shares](/img/price-per-share.png) + +As a result, price increases linearly, meaning that it will get more expensive to purchase a share over time. Here’s an example of what we mean, see the bonding curve below: + +![Bonding curve](/img/bonding-curve.png) + +Consider we have two curators that mint shares for a subgraph: + +- Curator A is the first to signal on the subgraph. By adding 120,000 GRT into the curve, they are able to mint 2000 shares. +- Curator B’s signal is on the subgraph at some point in time later. To receive the same amount of shares as Curator A, they would have to add 360,000 GRT into the curve. +- Since both curators hold half the total of curation shares, they would receive an equal amount of curator royalties. +- If any of the curators were now to burn their 2000 curation shares, they would receive 360,000 GRT. +- The remaining curator would now receive all the curator royalties for that subgraph. If they were to burn their shares to withdraw GRT, they would receive 120,000 GRT. +- **TLDR:** The GRT valuation of curation shares is determined by the bonding curve and can be volatile. There is potential to incur big losses. Signaling early means you put in less GRT for each share. By extension, this means you earn more curator royalties per GRT than later curators for the same subgraph. + +In general, a bonding curve is a mathematical curve that defines the relationship between token supply and asset price. In the specific case of subgraph curation, **the price of each subgraph share increases with each token invested** and the **price of each share decreases with each token sold.** + +In the case of The Graph, [Bancor’s implementation of a bonding curve formula](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) is leveraged. + +## How to Signal + +Now that we’ve covered the basics about how the bonding curve works, this is how you will proceed to signal on a subgraph. Within the Curator tab on the Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in the Explorer, [click here.](/network/explorer) + +A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. + +Signaling on a specific version is especially useful when one subgraph is used by multiple dApps. One dApp might need to regularly update the subgraph with new features. Another dApp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. + +Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. + +> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, initializes the bonding curve, and also transfers tokens into the Graph proxy. + +## What does Signaling mean for The Graph Network? + +For end consumers to be able to query a subgraph, the subgraph must first be indexed. Indexing is a process where files, data, and metadata are looked at, cataloged, and then indexed so that results can be found faster. In order for a subgraph’s data to be searchable, it needs to be organized. + +And so, if Indexers had to guess which subgraphs they should index, there would be a low chance that they would earn robust query fees because they’d have no way of validating which subgraphs are good quality. Enter curation. + +Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! + +![Signaling diagram](/img/curator-signaling.png) + +Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). + +![Explorer subgraphs](/img/explorer-subgraphs.png) + +## Risks + +1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. +2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. +3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). +4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. + - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. + - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. Note that you may receive more or less GRT than you initially deposited into the curation curve, which is a risk associated with being a curator. You can then signal on the new subgraph version, thus incurring a 1% curation tax. + +## Curation FAQs + +### 1. What % of query fees do Curators earn? + +By signalling on a subgraph, you will earn a share of all the query fees that this subgraph generates. 10% of all query fees goes to the Curators pro-rata to their curation shares. This 10% is subject to governance. + +### 2. How do I decide which subgraphs are high quality to signal on? + +Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: + +- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future +- Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. + +### 3. What’s the cost of updating a subgraph? + +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. + +### 4. How often can I update my subgraph? + +It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. + +### 5. Can I sell my curation shares? + +Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. + +Still confused? Check out our Curation video guide below: + + diff --git a/website/pages/cs/network/delegating.mdx b/website/pages/cs/network/delegating.mdx new file mode 100644 index 000000000000..4a6d6e00b73e --- /dev/null +++ b/website/pages/cs/network/delegating.mdx @@ -0,0 +1,98 @@ +--- +title: Delegating +--- + +Delegators are network participants who delegate (i.e., "stake") GRT to one or more Indexers. Delegators contribute to securing the network without running a Graph Node themselves. + +By delegating to an Indexer, Delegators earn a portion of the Indexer's query fees and rewards. The amount of queries an Indexer can process depends on the Indexer's own (and delegated) stake and the price the Indexer charges for each query, so the more stake that is allocated to an Indexer, the more potential queries they can process. + +## Delegator Guide + +This guide will explain how to be an effective Delegator in the Graph Network. Delegators share earnings of the protocol alongside all Indexers based on their delegated stake. A Delegator must use their best judgment to choose Indexers based on multiple factors. Please note this guide will not go over steps such as setting up Metamask properly, as that information is widely available on the internet. There are three sections in this guide: + +- The risks of delegating tokens in The Graph Network +- How to calculate expected returns as a Delegator +- A video guide showing the steps to delegate in the Graph Network UI + +## Delegation Risks + +Listed below are the main risks of being a Delegator in the protocol. + +### The delegation tax + +Delegators cannot be slashed for bad behavior, but there is a tax on Delegators to disincentivize poor decision-making that could harm the integrity of the network. + +It is important to understand that every time you delegate, you will be charged 0.5%. This means if you are delegating 1000 GRT, you will automatically burn 5 GRT. + +This means that to be safe, a Delegator should calculate what their return will be by delegating to an Indexer. For example, a Delegator might calculate how many days it will take before they have earned back the 0.5% tax on their delegation. + +### The delegation unbonding period + +Whenever a Delegator wants to undelegate, their tokens are subject to a 28-day unbonding period. This means they cannot transfer their tokens, or earn any rewards for 28 days. + +One thing to consider as well is choosing an Indexer wisely. If you choose an Indexer who was not trustworthy, or not doing a good job, you will want to undelegate, which means you will be losing a lot of opportunities to earn rewards, which can be just as bad as burning GRT. + +
+ ![Delegation unbonding](/img/Delegation-Unbonding.png) _Note the 0.5% fee in the Delegation UI, as well as the 28 day + unbonding period._ +
+ +### Choosing a trustworthy Indexer with a fair reward payout for Delegators + +This is an important part to understand. First let's discuss three very important values, which are the Delegation Parameters. + +Indexing Reward Cut - The indexing reward cut is the portion of the rewards that the Indexer will keep for themselves. That means if it is set to 100%, as a Delegator you will get 0 indexing rewards. If you see 80% in the UI, that means as a Delegator, you will receive 20%. An important note - at the beginning of the network, Indexing Rewards will account for the majority of the rewards. + +
+ ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *The top Indexer is giving Delegators 90% of the rewards. The + middle one is giving Delegators 20%. The bottom one is giving Delegators ~83%.* +
+ +- Query Fee Cut - This works exactly like the Indexing Reward Cut. However, this is specifically for returns on the query fees the Indexer collects. It should be noted that at the start of the network, returns from query fees will be very small compared to the indexing reward. It is recommended to pay attention to the network to determine when the query fees in the network will start to be more significant. + +As you can see, there is a lot of thought that must go into choosing the right Indexer. This is why we highly recommend you explore The Graph Discord to determine who the Indexers are with the best social reputation, and technical reputation, to reward Delegators consistently. Many of the Indexers are very active in Discord and will be happy to answer your questions. Many of them have been Indexing for months in the testnet, and are doing their best to help Delegators earn a good return, as it improves the health and success of the network. + +### Calculating Delegators expected return + +A Delegator has to consider a lot of factors when determining the return. These include: + +- A technical Delegator can also look at the Indexer's ability to use the Delegated tokens available to them. If an Indexer is not allocating all the tokens available, they are not earning the maximum profit they could be for themselves or their Delegators. +- Right now in the network an Indexer can choose to close an allocation and collect rewards anytime between 1 and 28 days. So it is possible that an Indexer has a lot of rewards they have not collected yet, and thus, their total rewards are low. This should be taken into consideration in the early days. + +### Considering the query fee cut and indexing fee cut + +As described in the above sections, you should choose an Indexer that is transparent and honest about setting their Query Fee Cut and Indexing Fee Cuts. A Delegator should also look at the Parameters Cooldown time to see how much of a time buffer they have. After that is done, it is fairly simple to calculate the amount of rewards the Delegators are getting. The formula is: + +![Delegation Image 3](/img/Delegation-Reward-Formula.png) + +### Considering the Indexer's delegation pool + +Another thing a Delegator has to consider is what proportion of the Delegation Pool they own. All delegation rewards are shared evenly, with a simple rebalancing of the pool determined by the amount the Delegator has deposited into the pool. This gives the Delegator a share of the pool: + +![Share formula](/img/Share-Forumla.png) + +Using this formula, we can see that it is actually possible for an Indexer who is offering only 20% to Delegators, to actually be giving Delegators an even better reward than an Indexer who is giving 90% to Delegators. + +A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. + +### Considering the delegation capacity + +Another thing to consider is the delegation capacity. Currently, the Delegation Ratio is set to 16. This means that if an Indexer has staked 1,000,000 GRT, their Delegation Capacity is 16,000,000 GRT of Delegated tokens that they can use in the protocol. Any delegated tokens over this amount will dilute all the Delegator rewards. + +Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is only 16,000,000 GRT. This means effectively, 84,000,000 GRT tokens are not being used to earn tokens. And all the Delegators, and the Indexer, are earning way less rewards than they could be. + +Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. + +## Delegator FAQs and Bugs + +### MetaMask "Pending Transaction" Bug + +**When I try to delegate my transaction in MetaMask appears as "Pending" or "Queued" for longer than expected. What should I do?** + +At times, attempts to delegate to indexers via MetaMask can fail and result in prolonged periods of "Pending" or "Queued" transaction attempts. For example, a user may attempt to delegate with an insufficient gas fee relative to the current prices, resulting in the transaction attempt displaying as "Pending" in their MetaMask wallet for 15+ minutes. When this occurs, subsequent transactions can be attempted by a user, but these will not be processed until the initial transaction is mined, as transactions for an address must be processed in order. In such cases, these transactions can be cancelled in MetaMask, but the transactions attempts will accrue gas fees without any guarantee that subsequent attempts will be successful. A simpler resolution to this bug is restarting the browsesr (e.g., using "abort:restart" in the address bar), which will cancel all previous attempts without gas being subtracted from the wallet. Several users that have encountered this issue and have reported successful transactions after restarting their browser and attempting to delegate. + +## Video guide for the network UI + +This guide provides a full review of this document, and how to consider everything in this document while interacting with the UI. + + diff --git a/website/pages/cs/network/developing.mdx b/website/pages/cs/network/developing.mdx new file mode 100644 index 000000000000..9c543348259d --- /dev/null +++ b/website/pages/cs/network/developing.mdx @@ -0,0 +1,53 @@ +--- +title: Developing +--- + +Developers are the demand side of The Graph ecosystem. Developers build subgraphs and publish them to The Graph Network. Then, they query live subgraphs with GraphQL in order to power their applications. + +## Subgraph Lifecycle + +Subgraphs deployed to the network have a defined lifecycle. + +### Build locally + +As with all subgraph development, it starts with local development and testing. Developers can use the same local setup whether they are building for The Graph Network, the hosted service or a local Graph Node, leveraging `graph-cli` and `graph-ts` to build their subgraph. Developers are encouraged to use tools such as [Matchstick](https://github.com/LimeChain/matchstick) for unit testing to improve the robustness of their subgraphs. + +> There are certain constraints on The Graph Network, in terms of feature and network support. Only subgraphs on [supported networks](/developing/supported-networks) will earn indexing rewards, and subgraphs which fetch data from IPFS are also not eligible. + +### Deploy to the Subgraph Studio + +Once defined, the subgraph can be built and deployed to the [Subgraph Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/). The Subgraph Studio is a sandbox environment which will index the deployed subgraph and make it available for rate-limited development and testing. This gives developers an opportunity to verify that their subgraph does not encounter any indexing errors, and works as expected. + +### Publish to the Network + +When the developer is happy with their subgraph, they can publish it to The Graph Network. This is an on-chain action, which registers the subgraph so that it is discoverable by Indexers. Published subgraphs have a corresponding NFT, which is then easily transferable. The published subgraph has associated metadata, which provides other network participants with useful context and information. + +### Signal to Encourage Indexing + +Published subgraphs are unlikely to be picked up by Indexers without the addition of signal. Signal is locked GRT associated with a given subgraph, which indicates to Indexers that a given subgraph will receive query volume, and also contributes to the indexing rewards available for processing it. Subgraph developers will generally add signal to their subgraph, in order to encourage indexing. Third party Curators may also signal on a given subgraph, if they deem the subgraph likely to drive query volume. + +### Querying & Application Development + +Once a subgraph has been processed by Indexers and is available for querying, developers can start to use the subgraph in their applications. Developers query subgraphs via a gateway, which forwards their queries to an Indexer who has processed the subgraph, paying query fees in GRT. + +In order to make queries, developers must generate an API key, which can be done in the Subgraph Studio. This API key must be funded with GRT, in order to pay query fees. Developers can set a maximum query fee, in order to control their costs, and limit their API key to a given subgraph or origin domain. The Subgraph Studio provides developers with data on their API key usage over time. + +Developers are also able to express an Indexer preference to the gateway, for example preferring Indexers whose query response is faster, or whose data is most up to date. These controls are set in the Subgraph Studio. + +### Updating Subgraphs + +After a time a subgraph developer may want to update their subgraph, perhaps fixing a bug or adding new functionality. The subgraph developer may deploy new version(s) of their subgraph to the Subgraph Studio for rate-limited development and testing. + +Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. + +### Deprecating Subgraphs + +At some point a developer may decide that they no longer need a published subgraph. At that point they may deprecate the subgraph, which returns any signalled GRT to the Curators. + +### Diverse Developer Roles + +Some developers will engage with the full subgraph lifecycle on the network, publishing, querying and iterating on their own subgraphs. Some may be focused on subgraph development, building open APIs which others can build on. Some may be application focused, querying subgraphs deployed by others. + +### Developers and Network Economics + +Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. diff --git a/website/pages/cs/network/explorer.mdx b/website/pages/cs/network/explorer.mdx new file mode 100644 index 000000000000..059f6fdf5fa5 --- /dev/null +++ b/website/pages/cs/network/explorer.mdx @@ -0,0 +1,203 @@ +--- +title: Graph Explorer +--- + +Welcome to the Graph Explorer, or as we like to call it, your decentralized portal into the world of subgraphs and network data. 👩🏽‍🚀 The Graph Explorer consists of multiple parts where you can interact with other subgraph developers, dapp developers, Curators, Indexers, and Delegators. For a general overview of the Graph Explorer, check out the video below (or keep reading below): + + + +## Subgraphs + +First things first, if you just finished deploying and publishing your subgraph in the Subgraph Studio, the Subgraphs tab on the top of the navigation bar is the place to view your own finished subgraphs (and the subgraphs of others) on the decentralized network. Here, you’ll be able to find the exact subgraph you’re looking for based on the date created, signal amount, or name. + +![Explorer Image 1](/img/Subgraphs-Explorer-Landing.png) + +When you click into a subgraph, you’ll be able to test queries in the playground and be able to leverage network details to make informed decisions. You’ll also be able to signal GRT on your own subgraph or the subgraphs of others to make indexers aware of its importance and quality. This is critical because signaling on a subgraph incentivizes it to be indexed, which means that it’ll surface on the network to eventually serve queries. + +![Explorer Image 2](/img/Subgraph-Details.png) + +On each subgraph’s dedicated page, several details are surfaced. These include: + +- Signal/Un-signal on subgraphs +- View more details such as charts, current deployment ID, and other metadata +- Switch versions to explore past iterations of the subgraph +- Query subgraphs via GraphQL +- Test subgraphs in the playground +- View the Indexers that are indexing on a certain subgraph +- Subgraph stats (allocations, Curators, etc) +- View the entity who published the subgraph + +![Explorer Image 3](/img/Explorer-Signal-Unsignal.png) + +## Participants + +Within this tab, you’ll get a bird’s eye view of all the people that are participating in the network activities, such as Indexers, Delegators, and Curators. Below, we’ll go into an in-depth review of what each tab means for you. + +### 1. Indexers + +![Explorer Image 4](/img/Indexer-Pane.png) + +Let’s start with the Indexers. Indexers are the backbone of the protocol, being the ones that stake on subgraphs, index them, and serve queries to anyone consuming subgraphs. In the Indexers table, you’ll be able to see an Indexers’ delegation parameters, their stake, how much they have staked to each subgraph, and how much revenue they have made off of query fees and indexing rewards. Deep dives below: + +- Query Fee Cut - the % of the query fee rebates that the Indexer keeps when splitting with Delegators +- Effective Reward Cut - the indexing reward cut applied to the delegation pool. If it’s negative, it means that the Indexer is giving away part of their rewards. If it’s positive, it means that the Indexer is keeping some of their rewards +- Cooldown Remaining - the time remaining until the Indexer can change the above delegation parameters. Cooldown periods are set up by Indexers when they update their delegation parameters +- Owned - This is the Indexer’s deposited stake, which may be slashed for malicious or incorrect behavior +- Delegated - Stake from Delegators which can be allocated by the Indexer, but cannot be slashed +- Allocated - Stake that Indexers are actively allocating towards the subgraphs they are indexing +- Available Delegation Capacity - the amount of delegated stake the Indexers can still receive before they become over-delegated +- Max Delegation Capacity - the maximum amount of delegated stake the Indexer can productively accept. An excess delegated stake cannot be used for allocations or rewards calculations. +- Query Fees - this is the total fees that end users have paid for queries from an Indexer over all time +- Indexer Rewards - this is the total indexer rewards earned by the Indexer and their Delegators over all time. Indexer rewards are paid through GRT issuance. + +Indexers can earn both query fees and indexing rewards. Functionally, this happens when network participants delegate GRT to an Indexer. This enables Indexers to receive query fees and rewards depending on their Indexer parameters. Indexing parameters are set by clicking on the right-hand side of the table, or by going into an Indexer’s profile and clicking the “Delegate” button. + +To learn more about how to become an Indexer, you can take a look at the [official documentation](/network/indexing) or [The Graph Academy Indexer guides.](https://thegraph.academy/delegators/choosing-indexers/) + +![Indexing details pane](/img/Indexing-Details-Pane.png) + +### 2. Curators + +Curators analyze subgraphs to identify which subgraphs are of the highest quality. Once a Curator has found a potentially attractive subgraph, they can curate it by signaling on its bonding curve. In doing so, Curators let Indexers know which subgraphs are high quality and should be indexed. + +Curators can be community members, data consumers, or even subgraph developers who signal on their own subgraphs by depositing GRT tokens into a bonding curve. By depositing GRT, Curators mint curation shares of a subgraph. As a result, Curators are eligible to earn a portion of the query fees that the subgraph they have signaled on generates. The bonding curve incentivizes Curators to curate the highest quality data sources. The Curator table in this section will allow you to see: + +- The date the Curator started curating +- The number of GRT that was deposited +- The number of shares a Curator owns + +![Explorer Image 6](/img/Curation-Overview.png) + +If you want to learn more about the Curator role, you can do so by visiting the following links of [The Graph Academy](https://thegraph.academy/curators/) or [official documentation.](/network/curating) + +### 3. Delegators + +Delegators play a key role in maintaining the security and decentralization of The Graph Network. They participate in the network by delegating (i.e., “staking”) GRT tokens to one or multiple indexers. Without Delegators, Indexers are less likely to earn significant rewards and fees. Therefore, Indexers seek to attract Delegators by offering them a portion of the indexing rewards and query fees that they earn. + +Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://thegraph.com/discord) or [The Graph Forum](https://forum.thegraph.com/)! + +![Explorer Image 7](/img/Delegation-Overview.png) + +The Delegators table will allow you to see the active Delegators in the community, as well as metrics such as: + +- The number of Indexers a Delegator is delegating towards +- A Delegator’s original delegation +- The rewards they have accumulated but have not withdrawn from the protocol +- The realized rewards they withdrew from the protocol +- Total amount of GRT they have currently in the protocol +- The date they last delegated at + +If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). + +## Network + +In the Network section, you will see global KPIs as well as the ability to switch to a per-epoch basis and analyze network metrics in more detail. These details will give you a sense of how the network is performing over time. + +### Activity + +The activity section has all the current network metrics as well as some cumulative metrics over time. Here you can see things like: + +- The current total network stake +- The stake split between the Indexers and their Delegators +- Total supply, minted, and burned GRT since the network inception +- Total Indexing rewards since the inception of the protocol +- Protocol parameters such as curation reward, inflation rate, and more +- Current epoch rewards and fees + +A few key details that are worth mentioning: + +- **Query fees represent the fees generated by the consumers**, and they can be claimed (or not) by the Indexers after a period of at least 7 epochs (see below) after their allocations towards the subgraphs have been closed and the data they served has been validated by the consumers. +- **Indexing rewards represent the amount of rewards the Indexers claimed from the network issuance during the epoch.** Although the protocol issuance is fixed, the rewards only get minted once the Indexers close their allocations towards the subgraphs they’ve been indexing. Thus the per-epoch number of rewards varies (ie. during some epochs, Indexers might’ve collectively closed allocations that have been open for many days). + +![Explorer Image 8](/img/Network-Stats.png) + +### Epochs + +In the Epochs section, you can analyze on a per-epoch basis, metrics such as: + +- Epoch start or end block +- Query fees generated and indexing rewards collected during a specific epoch +- Epoch status, which refers to the query fee collection and distribution and can have different states: + - The active epoch is the one in which Indexers are currently allocating stake and collecting query fees + - The settling epochs are the ones in which the state channels are being settled. This means that the Indexers are subject to slashing if the consumers open disputes against them. + - The distributing epochs are the epochs in which the state channels for the epochs are being settled and Indexers can claim their query fee rebates. + - The finalized epochs are the epochs that have no query fee rebates left to claim by the Indexers, thus being finalized. + +![Explorer Image 9](/img/Epoch-Stats.png) + +## Your User Profile + +Now that we’ve talked about the network stats, let’s move on to your personal profile. Your personal profile is the place for you to see your network activity, no matter how you’re participating on the network. Your crypto wallet will act as your user profile, and with the User Dashboard, you’ll be able to see: + +### Profile Overview + +This is where you can see any current actions you took. This is also where you can find your profile information, description, and website (if you added one). + +![Explorer Image 10](/img/Profile-Overview.png) + +### Subgraphs Tab + +If you click into the Subgraphs tab, you’ll see your published subgraphs. This will not include any subgraphs deployed with the CLI for testing purposes – subgraphs will only show up when they are published to the decentralized network. + +![Explorer Image 11](/img/Subgraphs-Overview.png) + +### Indexing Tab + +If you click into the Indexing tab, you’ll find a table with all the active and historical allocations towards the subgraphs, as well as charts that you can analyze and see your past performance as an Indexer. + +This section will also include details about your net Indexer rewards and net query fees. You’ll see the following metrics: + +- Delegated Stake - the stake from Delegators that can be allocated by you but cannot be slashed +- Total Query Fees - the total fees that users have paid for queries served by you over time +- Indexer Rewards - the total amount of Indexer rewards you have received, in GRT +- Fee Cut - the % of query fee rebates that you will keep when you split with Delegators +- Rewards Cut - the % of Indexer rewards that you will keep when splitting with Delegators +- Owned - your deposited stake, which could be slashed for malicious or incorrect behavior + +![Explorer Image 12](/img/Indexer-Stats.png) + +### Delegating Tab + +Delegators are important to the Graph Network. A Delegator must use their knowledge to choose an Indexer that will provide a healthy return on rewards. Here you can find details of your active and historical delegations, along with the metrics of the Indexers that you delegated towards. + +In the first half of the page, you can see your delegation chart, as well as the rewards-only chart. To the left, you can see the KPIs that reflect your current delegation metrics. + +The Delegator metrics you’ll see here in this tab include: + +- Total delegation rewards +- Total unrealized rewards +- Total realized rewards + +In the second half of the page, you have the delegations table. Here you can see the Indexers that you delegated towards, as well as their details (such as rewards cuts, cooldown, etc). + +With the buttons on the right side of the table, you can manage your delegation - delegate more, undelegate, or withdraw your delegation after the thawing period. + +Keep in mind that this chart is horizontally scrollable, so if you scroll all the way to the right, you can also see the status of your delegation (delegating, undelegating, withdrawable). + +![Explorer Image 13](/img/Delegation-Stats.png) + +### Curating Tab + +In the Curation tab, you’ll find all the subgraphs you’re signaling on (thus enabling you to receive query fees). Signaling allows Curators to highlight to Indexers which subgraphs are valuable and trustworthy, thus signaling that they need to be indexed on. + +Within this tab, you’ll find an overview of: + +- All the subgraphs you're curating on with signal details +- Share totals per subgraph +- Query rewards per subgraph +- Updated at date details + +![Explorer Image 14](/img/Curation-Stats.png) + +## Your Profile Settings + +Within your user profile, you’ll be able to manage your personal profile details (like setting up an ENS name). If you’re an Indexer, you have even more access to settings at your fingertips. In your user profile, you’ll be able to set up your delegation parameters and operators. + +- Operators take limited actions in the protocol on the Indexer's behalf, such as opening and closing allocations. Operators are typically other Ethereum addresses, separate from their staking wallet, with gated access to the network that Indexers can personally set +- Delegation parameters allow you to control the distribution of GRT between you and your Delegators. + +![Explorer Image 15](/img/Profile-Settings.png) + +As your official portal into the world of decentralized data, The Graph Explorer allows you to take a variety of actions, no matter your role in the network. You can get to your profile settings by opening the dropdown menu next to your address, then clicking on the Settings button. + +
![Wallet details](/img/Wallet-Details.png)
diff --git a/website/pages/cs/network/indexing.mdx b/website/pages/cs/network/indexing.mdx new file mode 100644 index 000000000000..1068043bcafa --- /dev/null +++ b/website/pages/cs/network/indexing.mdx @@ -0,0 +1,805 @@ +--- +title: Indexing +--- + +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. + +GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. + +Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. + + + +## FAQ + +### What is the minimum stake required to be an Indexer on the network? + +The minimum stake for an Indexer is currently set to 100K GRT. + +### What are the revenue streams for an Indexer? + +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. + +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. + +### How are indexing rewards distributed? + +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** + +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/vtvv7FP). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. + +### What is a proof of indexing (POI)? + +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. + +### When are indexing rewards distributed? + +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). + +### Can pending indexing rewards be monitored? + +The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) function that can be used to check the pending rewards for a specific allocation. + +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: + +1. Query the [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) to get the IDs for all active allocations: + +```graphql +query indexerAllocations { + indexer(id: "") { + allocations { + activeForIndexer { + allocations { + id + } + } + } + } +} +``` + +Use Etherscan to call `getRewards()`: + +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) + +* To call `getRewards()`: + - Expand the **10. getRewards** dropdown. + - Enter the **allocationID** in the input. + - Click the **Query** button. + +### What are disputes and where can I view them? + +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. + +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. + +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. + +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. + +### What are query fee rebates and when are they distributed? + +Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. + +Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. + +### What is query fee cut and indexing reward cut? + +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. + +- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. + +- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. + +### How do Indexers know which subgraphs to index? + +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: + +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. + +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. + +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. + +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. + +### What are the hardware requirements? + +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. + +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| --- | :-: | :-: | :-: | :-: | :-: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | + +### What are some basic security precautions an Indexer should take? + +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/network/indexing#stake-in-the-protocol) for instructions. + +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. + +## Infrastructure + +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. + +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. + +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. + +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. + +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. + +- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. + +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. + +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. + +### Ports overview + +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. + +#### Graph Node + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| --- | --- | --- | --- | --- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | +| 8040 | Prometheus metrics | /metrics | --metrics-port | - | + +#### Indexer Service + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| --- | --- | --- | --- | --- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | --metrics-port | - | + +#### Indexer Agent + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | + +### Setup server infrastructure using Terraform on Google Cloud + +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. + +#### Install prerequisites + +- Google Cloud SDK +- Kubectl command line tool +- Terraform + +#### Create a Google Cloud Project + +- Clone or navigate to the Indexer repository. + +- Navigate to the ./terraform directory, this is where all commands should be executed. + +```sh +cd terraform +``` + +- Authenticate with Google Cloud and create a new project. + +```sh +gcloud auth login +project= +gcloud projects create --enable-cloud-apis $project +``` + +- Use the Google Cloud Console's billing page to enable billing for the new project. + +- Create a Google Cloud configuration. + +```sh +proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") +gcloud config configurations create $project +gcloud config set project "$proj_id" +gcloud config set compute/region us-central1 +gcloud config set compute/zone us-central1-a +``` + +- Enable required Google Cloud APIs. + +```sh +gcloud services enable compute.googleapis.com +gcloud services enable container.googleapis.com +gcloud services enable servicenetworking.googleapis.com +gcloud services enable sqladmin.googleapis.com +``` + +- Create a service account. + +```sh +svc_name= +gcloud iam service-accounts create $svc_name \ + --description="Service account for Terraform" \ + --display-name="$svc_name" +gcloud iam service-accounts list +# Get the email of the service account from the list +svc=$(gcloud iam service-accounts list --format='get(email)' +--filter="displayName=$svc_name") +gcloud iam service-accounts keys create .gcloud-credentials.json \ + --iam-account="$svc" +gcloud projects add-iam-policy-binding $proj_id \ + --member serviceAccount:$svc \ + --role roles/editor +``` + +- Enable peering between database and Kubernetes cluster that will be created in the next step. + +```sh +gcloud compute addresses create google-managed-services-default \ + --prefix-length=20 \ + --purpose=VPC_PEERING \ + --network default \ + --global \ + --description 'IP Range for peer networks.' +gcloud services vpc-peerings connect \ + --network=default \ + --ranges=google-managed-services-default +``` + +- Create minimal terraform configuration file (update as needed). + +```sh +indexer= +cat > terraform.tfvars < \ + -f Dockerfile.indexer-service \ + -t indexer-service:latest \ +# Indexer agent +docker build \ + --build-arg NPM_TOKEN= \ + -f Dockerfile.indexer-agent \ + -t indexer-agent:latest \ +``` + +- Run the components + +```sh +docker run -p 7600:7600 -it indexer-service:latest ... +docker run -p 18000:8000 -it indexer-agent:latest ... +``` + +**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). + +#### Using K8s and Terraform + +See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section + +#### Usage + +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). + +#### Indexer agent + +```sh +graph-indexer-agent start \ + --ethereum \ + --ethereum-network mainnet \ + --mnemonic \ + --indexer-address \ + --graph-node-query-endpoint http://localhost:8000/ \ + --graph-node-status-endpoint http://localhost:8030/graphql \ + --graph-node-admin-endpoint http://localhost:8020/ \ + --public-indexer-url http://localhost:7600/ \ + --indexer-geo-coordinates \ + --index-node-ids default \ + --indexer-management-port 18000 \ + --metrics-port 7040 \ + --network-subgraph-endpoint https://gateway.network.thegraph.com/network \ + --default-allocation-amount 100 \ + --register true \ + --inject-dai true \ + --postgres-host localhost \ + --postgres-port 5432 \ + --postgres-username \ + --postgres-password \ + --postgres-database indexer \ + --allocation-management auto \ + | pino-pretty +``` + +#### Indexer service + +```sh +SERVER_HOST=localhost \ +SERVER_PORT=5432 \ +SERVER_DB_NAME=is_staging \ +SERVER_DB_USER= \ +SERVER_DB_PASSWORD= \ +graph-indexer-service start \ + --ethereum \ + --ethereum-network mainnet \ + --mnemonic \ + --indexer-address \ + --port 7600 \ + --metrics-port 7300 \ + --graph-node-query-endpoint http://localhost:8000/ \ + --graph-node-status-endpoint http://localhost:8030/graphql \ + --postgres-host localhost \ + --postgres-port 5432 \ + --postgres-username \ + --postgres-password \ + --postgres-database is_staging \ + --network-subgraph-endpoint https://gateway.network.thegraph.com/network \ + | pino-pretty +``` + +#### Indexer CLI + +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. + +```sh +graph indexer connect http://localhost:18000 +graph indexer status +``` + +#### Indexer management using Indexer CLI + +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. + +#### Usage + +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. + +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) + +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. + +- `graph indexer rules set [options] ...` - Set one or more indexing rules. + +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. + +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. + +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. + +- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additonal argument `--status` can be used to print out all actions of a certain status. + +- `graph indexer action queue allocate ` - Queue allocation action + +- `graph indexer action queue reallocate ` - Queue reallocate action + +- `graph indexer action queue unallocate ` - Queue unallocate action + +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator + +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution + +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately + +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. + +#### Indexing rules + +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. + +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. + +Data model: + +```graphql +type IndexingRule { + identifier: string + identifierType: IdentifierType + decisionBasis: IndexingDecisionBasis! + allocationAmount: number | null + allocationLifetime: number | null + autoRenewal: boolean + parallelAllocations: number | null + maxAllocationPercentage: number | null + minSignal: string | null + maxSignal: string | null + minStake: string | null + minAverageQueryFees: string | null + custom: string | null + requireSupported: boolean | null + } + +IdentifierType { + deployment + subgraph + group +} + +IndexingDecisionBasis { + rules + never + always + offchain +} +``` + +Example usage of indexing rule: + +``` +graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK + +graph indexer rules set QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK decisionBasis always allocationAmount 123321 allocationLifetime 14 autoRenewal false requireSupported false + +graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK + +graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK +``` + +#### Actions queue CLI + +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. + +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: + +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. + +Data model: + +```graphql +Type ActionInput { + status: ActionStatus + type: ActionType + deploymentID: string | null + allocationID: string | null + amount: string | null + poi: string | null + force: boolean | null + source: string + reason: string | null + priority: number | null +} + +ActionStatus { + queued + approved + pending + success + failed + canceled +} + +ActionType { + allocate + unallocate + reallocate + collect +} +``` + +Example usage from source: + +```bash +indexer indexer actions get all + +indexer indexer actions get --status queued + +indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 + +indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 + +indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae + +indexer indexer actions cancel + +indexer indexer actions approve 1 3 5 + +indexer indexer actions execute approve +``` + +Note that supported action types for allocation management have different input requirements: + +- `Allocate` - allocate stake to a specific subgraph deployment + + - required action params: + - deploymentID + - amount + +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere + + - required action params: + - allocationID + - deploymentID + - optional action params: + - poi + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment + + - required action params: + - allocationID + - deploymentID + - amount + - optional action params: + - poi + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + +#### Cost models + +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. + +#### Agora + +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. + +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. + +Example cost model: + +``` +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global +query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; + +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost +default => 0.1 * $SYSTEM_LOAD; +``` + +Example query costing using the above model: + +| Query | Price | +| ---------------------------------------------------------------------------- | ------- | +| { pairs(skip: 5000) { id } } | 0.5 GRT | +| { tokens { symbol } } | 0.1 GRT | +| { pairs(skip: 5000) { id { tokens } symbol } } | 0.6 GRT | + +#### Applying the cost model + +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. + +```sh +indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' +indexer cost set model my_model.agora +``` + +## Interacting with the network + +### Stake in the protocol + +The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. _ **Note**: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools)._ + +Once an Indexer has staked GRT in the protocol, the [Indexer components](/network/indexing#indexer-components) can be started up and begin their interactions with the network. + +#### Approve tokens + +1. Open the [Remix app](https://remix.ethereum.org/) in a browser + +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). + +3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. + +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. + +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. + +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). + +#### Stake tokens + +1. Open the [Remix app](https://remix.ethereum.org/) in a browser + +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. + +3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. + +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. + +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. + +6. Call `stake()` to stake GRT in the protocol. + +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. + +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. + +``` +setDelegationParameters(950000, 600000, 500) +``` + +### The life of an allocation + +After being created by an Indexer a healthy allocation goes through four states. + +- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. + +- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). + +- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. + +- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/cs/network/overview.mdx b/website/pages/cs/network/overview.mdx new file mode 100644 index 000000000000..bee546908372 --- /dev/null +++ b/website/pages/cs/network/overview.mdx @@ -0,0 +1,15 @@ +--- +title: Network Overview +--- + +The Graph Network is a decentralized indexing protocol for organizing blockchain data. Applications use GraphQL to query open APIs called subgraphs, to retrieve data that is indexed on the network. With The Graph, developers can build serverless applications that run entirely on public infrastructure. + +## Overview + +The Graph Network consists of Indexers, Curators and Delegators that provide services to the network, and serve data to Web3 applications. Consumers use the applications and consume the data. + +![Token Economics](/img/Network-roles@2x.png) + +To ensure economic security of The Graph Network and the integrity of data being queried, participants stake and use Graph Tokens ([GRT](/tokenomics)). GRT is a work utility token that is an ERC-20 used to allocate resources in the network. + +Active Indexers, Curators and Delegators can provide services and earn income from the network, proportional to the amount of work they perform and their GRT stake. diff --git a/website/pages/cs/new-chain-integration.mdx b/website/pages/cs/new-chain-integration.mdx new file mode 100644 index 000000000000..94ee68502336 --- /dev/null +++ b/website/pages/cs/new-chain-integration.mdx @@ -0,0 +1,75 @@ +--- +title: Integrating New Networks +--- + +Graph Node can currently index data from the following chain types: + +- Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) +- NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) +- Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) +- Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) + +If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. + +If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. + +** 1. EVM JSON-RPC** + +If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). + +**2. Firehose** + +For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/README/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. + +## Difference between EVM JSON-RPC & Firehose + +While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. + +New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. + +> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) + +--- + +## Testing an EVM JSON-RPC + +For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON RPC methods: + +- `eth_getLogs` +- `eth_call` \_(for historical blocks, with EIP-1898 - requires archive node): +- `eth_getBlockByNumber` +- `eth_getBlockByHash` +- `net_version` +- `eth_getTransactionReceipt`, in a JSON-RPC batch request +- _`trace_filter`_ _(optionally required for Graph Node to support call handlers)_ + +### Graph Node Configuration + +**Start by preparing your local environment** + +1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON RPC compliant URL + > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. +3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ + +\*Test the integration by locally deploying a subgraph\*\* + +1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) +2. Create a simple example subgraph. Some options are below: + 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point + 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) +3. Adapt the resulting `subgraph.yaml` by changing [`dataSources.network`](http://dataSources.network) to the same name previously passed on to Graph Node. +4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` + +Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. + +--- + +## Integrating a new Firehose-enabled chain + +Integrating a new chain is also possible using the Firehose approach. This is currently the best option for non-EVM chains and a requirement for substreams support. Additional documentation focuses on how Firehose works, adding Firehose support for a new chain and integrating it with Graph Node. Recommended docs for integrators: + +1. [General docs on Firehose](firehose/) +2. [Adding Firehose support for a new chain](firehose/integrate-new-chains/new-blockchains/) +3. [Integrating Graph Node with a new chain via Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) diff --git a/website/pages/cs/operating-graph-node.mdx b/website/pages/cs/operating-graph-node.mdx new file mode 100644 index 000000000000..832b6cccf347 --- /dev/null +++ b/website/pages/cs/operating-graph-node.mdx @@ -0,0 +1,345 @@ +--- +title: Operating Graph Node +--- + +Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. + +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). + +## Graph Node + +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. + +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). + +### PostgreSQL database + +The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. + +### Network clients + +In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. + +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). + +**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). + +### IPFS Nodes + +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. + +### Prometheus metrics server + +To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. + +### Getting started from source + +#### Install prerequisites + +- **Rust** + +- **PostgreSQL** + +- **IPFS** + +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. + +```sh +sudo apt-get install -y clang libpg-dev libssl-dev pkg-config +``` + +#### Setup + +1. Start a PostgreSQL database server + +```sh +initdb -D .postgres +pg_ctl -D .postgres -l logfile start +createdb graph-node +``` + +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` + +3. Now that all the dependencies are setup, start the Graph Node: + +```sh +cargo run -p graph-node --release -- \ + --postgres-url postgresql://[USERNAME]:[PASSWORD]@localhost:5432/graph-node \ + --ethereum-rpc [NETWORK_NAME]:[URL] \ + --ipfs https://ipfs.network.thegraph.com +``` + +### Getting started with Kubernetes + +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). + +### Ports + +When it is running Graph Node exposes the following ports: + +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| --- | --- | --- | --- | --- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | +| 8040 | Prometheus metrics | /metrics | --metrics-port | - | + +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. + +## Advanced Graph Node configuration + +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. + +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. + +### `config.toml` + +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. + +> When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. + +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: + +```toml +[store] +[store.primary] +connection="<.. postgres-url argument ..>" +[deployment] +[[deployment.rule]] +indexers = [ "<.. list of all indexing nodes ..>" ] +``` + +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). + +#### Multiple Graph Nodes + +Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestor), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). + +> Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. + +#### Deployment rules + +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. + +Example deployment rule configuration: + +```toml +[deployment] +[[deployment.rule]] +match = { name = "(vip|important)/.*" } +shard = "vip" +indexers = [ "index_node_vip_0", "index_node_vip_1" ] +[[deployment.rule]] +match = { network = "kovan" } +# No shard, so we use the default shard called 'primary' +indexers = [ "index_node_kovan_0" ] +[[deployment.rule]] +match = { network = [ "xdai", "poa-core" ] } +indexers = [ "index_node_other_0" ] +[[deployment.rule]] +# There's no 'match', so any subgraph matches +shards = [ "sharda", "shardb" ] +indexers = [ + "index_node_community_0", + "index_node_community_1", + "index_node_community_2", + "index_node_community_3", + "index_node_community_4", + "index_node_community_5" + ] +``` + +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). + +#### Dedicated query nodes + +Nodes can be configured to explicitly be query nodes by including the following in the configuration file: + +```toml +[general] +query = "" +``` + +Any node whose --node-id matches the regular expression will be set up to only respond to queries. + +#### Database scaling via sharding + +For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. + +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. + +Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. + +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. + +In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. + +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). + +#### Dedicated block ingestion + +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: + +```toml +[chains] +ingestor = "block_ingestor_node" +``` + +#### Supporting multiple networks + +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: + +- Multiple networks +- Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). +- Additional provider details, such as features, authentication and the type of provider (for experimental Firehose support) + +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. + +```toml +[chains] +ingestor = "block_ingestor_node" +[chains.mainnet] +shard = "vip" +provider = [ + { label = "mainnet1", url = "http://..", features = [], headers = { Authorization = "Bearer foo" } }, + { label = "mainnet2", url = "http://..", features = [ "archive", "traces" ] } +] +[chains.kovan] +shard = "primary" +provider = [ { label = "kovan", url = "http://..", features = [] } ] +``` + +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). + +### Environment variables + +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). + +### Continuous deployment + +Users who are operating a scaled indexing setup with advanced configuration may benefit from managing their Graph Nodes with Kubernetes. + +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. + +### Managing Graph Node + +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. + +#### Logging + +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. + +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). + +#### Monitoring & alerting + +Graph Node provides the metrics via Prometheus endpoint on 8040 port by default. Grafana can then be used to visualise these metrics. + +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). + +#### Graphman + +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. + +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. + +Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` + +### Working with subgraphs + +#### Indexing status API + +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. + +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). + +#### Indexing performance + +There are three separate parts of the indexing process: + +- Fetching events of interest from the provider +- Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) +- Writing the resulting data to the store + +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. + +Common causes of indexing slowness: + +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers +- A large amount of store interaction during execution +- A large amount of data to save to the store +- A large number of events to process +- Slow database connection time, for crowded nodes +- The provider itself falling behind the chain head +- Slowness in fetching new receipts at the chain head from the provider + +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. + +#### Failed subgraphs + +During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: + +- Deterministic failures: these are failures which will not be resolved with retries +- Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. + +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. + +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. + +#### Block and call cache + +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. + +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. + +If a block cache inconsistency is suspected, such as a tx receipt missing event: + +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. + 2. If the block matches the provider, then the issue can be debugged directly against the provider. + +#### Querying issues and errors + +Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. + +However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. + +There is not one "silver bullet", but a range of tools for preventing, diagnosing and dealing with slow queries. + +##### Query caching + +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). + +##### Analysing queries + +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. + +In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. + +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. + +Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. + +##### Account-like optimisation + +Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) + +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. + +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. + +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. + +Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. + +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. + +#### Removing subgraphs + +> This is new functionality, which will be available in Graph Node 0.29.x + +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). diff --git a/website/pages/cs/publishing/_meta.js b/website/pages/cs/publishing/_meta.js new file mode 100644 index 000000000000..eb06f56f912a --- /dev/null +++ b/website/pages/cs/publishing/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/publishing/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/publishing/publishing-a-subgraph.mdx b/website/pages/cs/publishing/publishing-a-subgraph.mdx new file mode 100644 index 000000000000..1d284dc63af8 --- /dev/null +++ b/website/pages/cs/publishing/publishing-a-subgraph.mdx @@ -0,0 +1,33 @@ +--- +title: Publishing a Subgraph to the Decentralized Network +--- + +Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. + +For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + +You can find the list of the supported networks [Here](/developing/supported-networks). + +## Publishing a subgraph + +Subgraphs can be published to the decentralized network directly from the Subgraph Studio dashboard by clicking on the **Publish** button. Once a subgraph is published, it will be available to view in the [Graph Explorer](https://thegraph.com/explorer/). + +- Subgraphs can be published to Goerli, Arbitrum goerli, Arbitrum One, or Ethereum mainnet. + +- Regardless of the network the subgraph was published on, it can index data on any of the [supported networks](/developing/supported-networks). + +- When publishing a new version for an existing subgraph the same rules apply as above. + +## Curating your subgraph + +> It is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +Subgraph Studio enables you to be the first to curate your subgraph by adding GRT to your subgraph's curation pool in the same transaction. When publishing your subgraph, make sure to check the button that says, "Be the first to signal on this subgraph." + +![Curation Pool](/img/curate-own-subgraph-tx.png) + +## Updating metadata for a published subgraph + +Once your subgraph has been published to the decentralized network, you can modify the metadata at any time by making the update in the Subgraph Studio dashboard of the subgraph. After saving the changes and publishing your updates to the network, they will be reflected in The Graph Explorer. This won’t create a new version, as your deployment hasn’t changed. diff --git a/website/pages/cs/querying/_meta.js b/website/pages/cs/querying/_meta.js new file mode 100644 index 000000000000..e52da8f399fb --- /dev/null +++ b/website/pages/cs/querying/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/querying/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/querying/distributed-systems.mdx b/website/pages/cs/querying/distributed-systems.mdx new file mode 100644 index 000000000000..85337206bfd3 --- /dev/null +++ b/website/pages/cs/querying/distributed-systems.mdx @@ -0,0 +1,134 @@ +--- +title: Distributed Systems +--- + +The Graph is a protocol implemented as a distributed system. + +Connections fail. Requests arrive out of order. Different computers with out-of-sync clocks and states process related requests. Servers restart. Re-orgs happen between requests. These problems are inherent to all distributed systems but are exacerbated in systems operating at a global scale. + +Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. + +1. Indexer ingests block 8 +2. Request served to the client for block 8 +3. Indexer ingests block 9 +4. Indexer ingests block 10A +5. Request served to the client for block 10A +6. Indexer detects reorg to 10B and rolls back 10A +7. Request served to the client for block 9 +8. Indexer ingests block 10B +9. Indexer ingests block 11 +10. Request served to the client for block 11 + +From the point of view of the Indexer, things are progressing forward logically. Time is moving forward, though we did have to roll back an uncle block and play the block under consensus forward on top of it. Along the way, the Indexer serves requests using the latest state it knows about at that time. + +From the point of view of the client, however, things appear chaotic. The client observes that the responses were for blocks 8, 10, 9, and 11 in that order. We call this the "block wobble" problem. When a client experiences block wobble, data may appear to contradict itself over time. The situation worsens when we consider that Indexers do not all ingest the latest blocks simultaneously, and your requests may be routed to multiple Indexers. + +It is the responsibility of the client and server to work together to provide consistent data to the user. Different approaches must be used depending on the desired consistency as there is no one right program for every problem. + +Reasoning through the implications of distributed systems is hard, but the fix may not be! We've established APIs and patterns to help you navigate some common use-cases. The following examples illustrate those patterns but still elide details required by production code (like error handling and cancellation) to not obfuscate the main ideas. + +## Polling for updated data + +The Graph provides the `block: { number_gte: $minBlock }` API, which ensures that the response is for a single block equal or higher to `$minBlock`. If the request is made to a `graph-node` instance and the min block is not yet synced, `graph-node` will return an error. If `graph-node` has synced min block, it will run the response for the latest block. If the request is made to an Edge & Node Gateway, the Gateway will filter out any Indexers that have not yet synced min block and make the request for the latest block the Indexer has synced. + +We can use `number_gte` to ensure that time never travels backward when polling for data in a loop. Here is an example: + +```javascript +/// Updates the protocol.paused variable to the latest +/// known value in a loop by fetching it using The Graph. +async function updateProtocolPaused() { + // It's ok to start with minBlock at 0. The query will be served + // using the latest block available. Setting minBlock to 0 is the + // same as leaving out that argument. + let minBlock = 0 + + for (;;) { + // Schedule a promise that will be ready once + // the next Ethereum block will likely be available. + const nextBlock = new Promise((f) => { + setTimeout(f, 14000) + }) + + const query = ` + query GetProtocol($minBlock: Int!) { + protocol(block: { number_gte: $minBlock } id: "0") { + paused + } + _meta { + block { + number + } + } + }` + + const variables = { minBlock } + const response = await graphql(query, variables) + minBlock = response._meta.block.number + + // TODO: Do something with the response data here instead of logging it. + console.log(response.protocol.paused) + + // Sleep to wait for the next block + await nextBlock + } +} +``` + +## Fetching a set of related items + +Another use-case is retrieving a large set or, more generally, retrieving related items across multiple requests. Unlike the polling case (where the desired consistency was to move forward in time), the desired consistency is for a single point in time. + +Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. + +```javascript +/// Gets a list of domain names from a single block using pagination +async function getDomainNames() { + // Set a cap on the maximum number of items to pull. + let pages = 5 + const perPage = 1000 + + // The first query will get the first page of results and also get the block + // hash so that the remainder of the queries are consistent with the first. + const listDomainsQuery = ` + query ListDomains($perPage: Int!) { + domains(first: $perPage) { + name + id + } + _meta { + block { + hash + } + } + }` + + let data = await graphql(listDomainsQuery, { perPage }) + let result = data.domains.map((d) => d.name) + let blockHash = data._meta.block.hash + + let query + // Continue fetching additional pages until either we run into the limit of + // 5 pages total (specified above) or we know we have reached the last page + // because the page has fewer entities than a full page. + while (data.domains.length == perPage && --pages) { + let lastID = data.domains[data.domains.length - 1].id + query = ` + query ListDomains($perPage: Int!, $lastID: ID!, $blockHash: Bytes!) { + domains(first: $perPage, where: { id_gt: $lastID }, block: { hash: $blockHash }) { + name + id + } + }` + + data = await graphql(query, { perPage, lastID, blockHash }) + + // Accumulate domain names into the result + for (domain of data.domains) { + result.push(domain.name) + } + } + return result +} +``` + +Note that in case of a re-org, the client will need to retry from the first request to update the block hash to a non-uncle block. diff --git a/website/pages/cs/querying/graphql-api.mdx b/website/pages/cs/querying/graphql-api.mdx new file mode 100644 index 000000000000..89cda460d58f --- /dev/null +++ b/website/pages/cs/querying/graphql-api.mdx @@ -0,0 +1,419 @@ +--- +title: GraphQL API +--- + +This guide explains the GraphQL Query API that is used for the Graph Protocol. + +## Queries + +In your subgraph schema you define types called `Entities`. For each `Entity` type, an `entity` and `entities` field will be generated on the top-level `Query` type. Note that `query` does not need to be included at the top of the `graphql` query when using The Graph. + +### Examples + +Query for a single `Token` entity defined in your schema: + +```graphql +{ + token(id: "1") { + id + owner + } +} +``` + +> **Note:** When querying for a single entity, the `id` field is required, and it must be a string. + +Query all `Token` entities: + +```graphql +{ + tokens { + id + owner + } +} +``` + +### Sorting + +When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. + +#### Example + +```graphql +{ + tokens(orderBy: price, orderDirection: asc) { + id + owner + } +} +``` + +#### Example for nested entity sorting + +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. + +In the following example, we sort the tokens by the name of their owner: + +```graphql +{ + tokens(orderBy: owner__name, orderDirection: asc) { + id + owner { + name + } + } +} +``` + +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. + +### Pagination + +When querying a collection, the `first` parameter can be used to paginate from the beginning of the collection. It is worth noting that the default sort order is by ID in ascending alphanumeric order, not by creation time. + +Further, the `skip` parameter can be used to skip entities and paginate. e.g. `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. + +Queries should avoid using very large `skip` values since they generally perform poorly. For retrieving a large number of items, it is much better to page through entities based on an attribute as shown in the last example. + +#### Example using `first` + +Query the first 10 tokens: + +```graphql +{ + tokens(first: 10) { + id + owner + } +} +``` + +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. + +#### Example using `first` and `skip` + +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: + +```graphql +{ + tokens(first: 10, skip: 10) { + id + owner + } +} +``` + +#### Example using `first` and `id_ge` + +If a client needs to retrieve a large number of entities, it is much more performant to base queries on an attribute and filter by that attribute. For example, a client would retrieve a large number of tokens using this query: + +```graphql +query manyTokens($lastID: String) { + tokens(first: 1000, where: { id_gt: $lastID }) { + id + owner + } +} +``` + +The first time, it would send the query with `lastID = ""`, and for subsequent requests would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. + +### Filtering + +You can use the `where` parameter in your queries to filter for different properties. You can filter on mulltiple values within the `where` parameter. + +#### Example using `where` + +Query challenges with `failed` outcome: + +```graphql +{ + challenges(where: { outcome: "failed" }) { + challenger + outcome + application { + id + } + } +} +``` + +You can use suffixes like `_gt`, `_lte` for value comparison: + +#### Example for range filtering + +```graphql +{ + applications(where: { deposit_gt: "10000000000" }) { + id + whitelisted + deposit + } +} +``` + +#### Example for block filtering + +You can also filter entities by the `_change_block(number_gte: Int)` - this filters entities which were updated in or after the specified block. + +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). + +```graphql +{ + applications(where: { _change_block: { number_gte: 100 } }) { + id + whitelisted + deposit + } +} +``` + +#### Example for nested entity filtering + +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. + +This can be useful if you are looking to fetch only entities whose child-level entities meet the provided conditions. + +```graphql +{ + challenges(where: { application_: { id: 1 } }) { + challenger + outcome + application { + id + } + } +} +``` + +#### Logical operators + +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. + +##### `AND` Operator + +In the following example, we are filtering for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. + +```graphql +{ + challenges(where: { and: [{ number_gte: 100 }, { outcome: "succeeded" }] }) { + challenger + outcome + application { + id + } + } +} +``` + +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. +> +> ```graphql +> { +> challenges(where: { number_gte: 100, outcome: "succeeded" }) { +> challenger +> outcome +> application { +> id +> } +> } +> } +> ``` + +##### `OR` Operator + +In the following example, we are filtering for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. + +```graphql +{ + challenges(where: { or: [{ number_gte: 100 }, { outcome: "succeeded" }] }) { + challenger + outcome + application { + id + } + } +} +``` + +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. + +#### All Filters + +Full list of parameter suffixes: + +``` +_ +_not +_gt +_lt +_gte +_lte +_in +_not_in +_contains +_contains_nocase +_not_contains +_not_contains_nocase +_starts_with +_starts_with_nocase +_ends_with +_ends_with_nocase +_not_starts_with +_not_starts_with_nocase +_not_ends_with +_not_ends_with_nocase +``` + +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. + +In addition, the following global filters are available as part of `where` argument: + +```gr +_change_block(number_gte: Int) +``` + +### Time-travel queries + +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. + +The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to not be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. + +Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. + +#### Example + +```graphql +{ + challenges(block: { number: 8000000 }) { + challenger + outcome + application { + id + } + } +} +``` + +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. + +#### Example + +```graphql +{ + challenges(block: { hash: "0x5a0b54d5dc17e0aadc383d2db43b0a0d3e029c4c" }) { + challenger + outcome + application { + id + } + } +} +``` + +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. + +### Fulltext Search Queries + +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) to add fulltext search to your subgraph. + +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. + +Fulltext search operators: + +| Symbol | Operator | Description | +| --- | --- | --- | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | + +#### Examples + +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. + +```graphql +{ + blogSearch(text: "anarchism | crumpets") { + id + title + body + author + } +} +``` + +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" + +```graphql +{ + blogSearch(text: "decentralized <-> philosophy") { + id + title + body + author + } +} +``` + +Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". + +```graphql +{ + blogSearch(text: "lou:* <-> music") { + id + title + body + author + } +} +``` + +### Validation + +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. + +## Schema + +The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). + +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your subgraph manifest. + +> **Note:** Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. + +### Entities + +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. + +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. + +### Subgraph Metadata + +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: + +```graphQL +{ + _meta(block: { number: 123987 }) { + block { + number + hash + timestamp + } + deployment + hasIndexingErrors + } +} +``` + +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. + +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. + +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): + +- hash: the hash of the block +- number: the block number +- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) + +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block diff --git a/website/pages/cs/querying/managing-api-keys.mdx b/website/pages/cs/querying/managing-api-keys.mdx new file mode 100644 index 000000000000..ee7c274bca10 --- /dev/null +++ b/website/pages/cs/querying/managing-api-keys.mdx @@ -0,0 +1,26 @@ +--- +title: Managing your API keys +--- + +Regardless of whether you’re a dapp developer or a subgraph developer, you’ll need to manage your API keys. This is important for you to be able to query subgraphs because API keys make sure the connections between application services are valid and authorized. This includes authenticating the end user and the device using the application. + +The Studio will list out existing API keys, which will give you the ability to manage or delete them. + +1. The **Overview** section will allow you to: + - Edit your key name + - Regenerate API keys + - View the current usage of the API key with stats: + - Number of queries + - Amount of GRT spent +2. Under **Security**, you’ll be able to opt into security settings depending on the level of control you’d like to have over your API keys. In this section, you can: + - View and manage the domain names authorized to use your API key + - Assign subgraphs that can be queried with your API key +3. Under **Indexer Preference**, you’ll be able to set different preferences for Indexers who are indexing subgraphs that your API key is used for. You can assign up to 5 points for each of these: + - **Fastest Speed**: Time between the query and the response from an indexer. If you mark this as important we will optimize for fast indexers. + - **Lowest Price**: The amount paid per query. If you mark this as important we will optimize for the less expensive indexers. + - **Data Freshness**: How recent the latest block an indexer has processed for the subgraph you are querying. If you mark this as important we will optimize to find the indexers with the freshest data. + - **Economic Security**: The amount of GRT an indexer can lose if they respond incorrectly to your query. If you mark this as important we will optimize for indexers with a large stake. +4. Under **Budget**, you’ll be able to update the maximum price per query. Note that we have a dynamic setting for that that's based on a volume discounting algorithm. **We strongly recommend using the default settings unless you are experiencing a specific problem.** Otherwise, you can update it under "Set a custom maximum budget". On this page you can also view different KPIs (in GRT and USD): + - Average cost per query + - Failed queries over max price + - Most expensive query diff --git a/website/pages/cs/querying/querying-best-practices.mdx b/website/pages/cs/querying/querying-best-practices.mdx new file mode 100644 index 000000000000..98c0ffb72c61 --- /dev/null +++ b/website/pages/cs/querying/querying-best-practices.mdx @@ -0,0 +1,463 @@ +--- +title: Querying Best Practices +--- + +The Graph provides a decentralized way to query data from blockchains. + +The Graph network's data is exposed through a GraphQL API, making it easier to query data with the GraphQL language. + +This page will guide you through the essential GraphQL language rules and GraphQL queries best practices. + +--- + +## Querying a GraphQL API + +### The anatomy of a GraphQL query + +Unlike REST API, a GraphQL API is built upon a Schema that defines which queries can be performed. + +For example, a query to get a token using the `token` query will look as follows: + +```graphql +query GetToken($id: ID!) { + token(id: $id) { + id + owner + } +} +``` + +which will return the following predictable JSON response (_when passing the proper `$id` variable value_): + +```json +{ + "token": { + "id": "...", + "owner": "..." + } +} +``` + +GraphQL queries use the GraphQL language, which is defined upon [a specification](https://spec.graphql.org/). + +The above `GetToken` query is composed of multiple language parts (replaced below with `[...]` placeholders): + +```graphql +query [operationName]([variableName]: [variableType]) { + [queryName]([argumentName]: [variableName]) { + # "{ ... }" express a Selection-Set, we are querying fields from `queryName`. + [field] + [field] + } +} +``` + +While the list of syntactic do's and don'ts is long, here are the essential rules to keep in mind when it comes to writing GraphQL queries: + +- Each `queryName` must only be used once per operation. +- Each `field` must be used only once in a selection (we cannot query `id` twice under `token`) +- Some `field`s or queries (like `tokens`) return complex types that require a selection of sub-field. Not providing a selection when expected (or providing one when not expected - for example, on `id`) will raise an error. To know a field type, please refer to [The Graph Explorer](/network/explorer). +- Any variable assigned to an argument must match its type. +- In a given list of variables, each of them must be unique. +- All defined variables must be used. + +Failing to follow the above rules will end with an error from the Graph API. + +For a complete list of rules with code examples, please look at our GraphQL Validations guide. + +### Sending a query to a GraphQL API + +GraphQL is a language and set of conventions that transport over HTTP. + +It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). + +However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: + +- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query +- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Fully typed result + +Here's how to query The Graph with `graph-client`: + +```tsx +import { execute } from '../.graphclient' + +const query = ` +query GetToken($id: ID!) { + token(id: $id) { + id + owner + } +} +` +const variables = { id: '1' } + +async function main() { + const result = await execute(query, variables) + // `result` is fully typed! + console.log(result) +} + +main() +``` + +More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). + +Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. + +--- + +## Writing GraphQL queries + +### Always write static queries + +A common (bad) practice is to dynamically build query strings as follows: + +```tsx +const id = params.id +const fields = ['id', 'owner'] +const query = ` +query GetToken { + token(id: ${id}) { + ${fields.join('\n')} + } +} +` + +// Execute query... +``` + +While the above snippet produces a valid GraphQL query, **it has many drawbacks**: + +- it makes it **harder to understand** the query as a whole +- developers are **responsible for safely sanitizing the string interpolation** +- not sending the values of the variables as part of the request parameters **prevent possible caching on server-side** +- it **prevents tools from statically analyzing the query** (ex: Linter, or type generations tools) + +For this reason, it is recommended to always write queries as static strings: + +```tsx +import { execute } from 'your-favorite-graphql-client' + +const id = params.id +const query = ` +query GetToken($id: ID!) { + token(id: $id) { + id + owner + } +} +` + +const result = await execute(query, { + variables: { + id, + }, +}) +``` + +Doing so brings **many advantages**: + +- **Easy to read and maintain** queries +- The GraphQL **server handles variables sanitization** +- **Variables can be cached** at server-level +- **Queries can be statically analyzed by tools** (more on this in the following sections) + +**Note: How to include fields conditionally in static queries** + +We might want to include the `owner` field only on a particular condition. + +For this, we can leverage the `@include(if:...)` directive as follows: + +```tsx +import { execute } from 'your-favorite-graphql-client' + +const id = params.id +const query = ` +query GetToken($id: ID!, $includeOwner: Boolean) { + token(id: $id) { + id + owner @include(if: $includeOwner) + } +} +` + +const result = await execute(query, { + variables: { + id, + includeOwner: true, + }, +}) +``` + +Note: The opposite directive is `@skip(if: ...)`. + +### Performance tips + +**"Ask for what you want"** + +GraphQL became famous for its "Ask for what you want" tagline. + +For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. + +When querying GraphQL APIs, always think of querying only the fields that will be actually used. + +A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. + +For example, in the following query: + +```graphql +query listTokens { + tokens { + # will fetch up to 100 tokens + id + transactions { + # will fetch up to 100 transactions + id + } + } +} +``` + +The response could contain 100 transactions for each of the 100 tokens. + +If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. + +**Combining multiple queries** + +Your application might require querying multiple types of data as follows: + +```graphql +import { execute } from "your-favorite-graphql-client" + +const tokensQuery = ` +query GetTokens { + tokens(first: 50) { + id + owner + } +} +` +const countersQuery = ` +query GetCounters { + counters { + id + value + } +} +` + +const [tokens, counters] = Promise.all( + [ + tokensQuery, + countersQuery, + ].map(execute) +) +``` + +While this implementation is totally valid, it will require two round trips with the GraphQL API. + +Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: + +```graphql +import { execute } from "your-favorite-graphql-client" + +const query = ` +query GetTokensandCounters { + tokens(first: 50) { + id + owner + } + counters { + id + value + } +} +` + +const { result: { tokens, counters } } = execute(query) +``` + +This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. + +### Leverage GraphQL Fragments + +A helpful feature to write GraphQL queries is GraphQL Fragment. + +Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): + +```graphql +query { + bondEvents { + id + newDelegate { + id + active + status + } + oldDelegate { + id + active + status + } + } +} +``` + +Such repeated fields (`id`, `active`, `status`) bring many issues: + +- harder to read for more extensive queries +- when using tools that generate TypeScript types based on queries (_more on that in the last section_), `newDelegate` and `oldDelegate` will result in two distinct inline interfaces. + +A refactored version of the query would be the following: + +```graphql +query { + bondEvents { + id + newDelegate { + ...DelegateItem + } + oldDelegate { + ...DelegateItem + } + } +} + +# we define a fragment (subtype) on Transcoder +# to factorize repeated fields in the query +fragment DelegateItem on Transcoder { + id + active + status +} +``` + +Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. + +When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). + +### GraphQL Fragment do's and don'ts + +**Fragment base must be a type** + +A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: + +```graphql +fragment MyFragment on BigInt { + # ... +} +``` + +`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. + +**How to spread a Fragment** + +Fragments are defined on specific types and should be used accordingly in queries. + +Example: + +```graphql +query { + bondEvents { + id + newDelegate { + ...VoteItem # Error! `VoteItem` cannot be spread on `Transcoder` type + } + oldDelegate { + ...VoteItem + } + } +} + +fragment VoteItem on Vote { + id + voter +} +``` + +`newDelegate` and `oldDelegate` are of type `Transcoder`. + +It is not possible to spread a fragment of type `Vote` here. + +**Define Fragment as an atomic business unit of data** + +GraphQL Fragment must be defined based on their usage. + +For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. + +Here is a rule of thumb for using Fragment: + +- when fields of the same type are repeated in a query, group them in a Fragment +- when similar but not the same fields are repeated, create multiple fragments, ex: + +```graphql +# base fragment (mostly used in listing) +fragment Voter on Vote { + id + voter +} + +# extended fragment (when querying a detailed view of a vote) +fragment VoteWithPoll on Vote { + id + voter + choiceID + poll { + id + proposal + } +} +``` + +--- + +## The essential tools + +### GraphQL web-based explorers + +Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. + +If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). + +### GraphQL Linting + +In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. + +**GraphQL ESLint** + +[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. + +[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: + +- `@graphql-eslint/fields-on-correct-type`: is a field used on a proper type? +- `@graphql-eslint/no-unused variables`: should a given variable stay unused? +- and more! + +This will allow you to **catch errors without even testing queries** on the playground or running them in production! + +### IDE plugins + +**VSCode and GraphQL** + +The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: + +- syntax highlighting +- autocomplete suggestions +- validation against schema +- snippets +- go to definition for fragments and input types + +If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. + +**WebStorm/Intellij and GraphQL** + +The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: + +- syntax highlighting +- autocomplete suggestions +- validation against schema +- snippets + +More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. diff --git a/website/pages/cs/querying/querying-from-an-application.mdx b/website/pages/cs/querying/querying-from-an-application.mdx new file mode 100644 index 000000000000..30b6c2264d64 --- /dev/null +++ b/website/pages/cs/querying/querying-from-an-application.mdx @@ -0,0 +1,267 @@ +--- +title: Querying from an Application +--- + +Once a subgraph is deployed to the Subgraph Studio or to The Graph Explorer, you will be given the endpoint for your GraphQL API that should look something like this: + +**Subgraph Studio (testing endpoint)** + +```sh +Queries (HTTP) +https://api.studio.thegraph.com/query/// +``` + +**Graph Explorer** + +```sh +Queries (HTTP) +https://gateway.thegraph.com/api//subgraphs/id/ +``` + +Using the GraphQL endpoint, you can use various GraphQL Client libraries to query the subgraph and populate your app with the data indexed by the subgraph. + +Here are a couple of the more popular GraphQL clients in the ecosystem and how to use them: + +## GraphQL clients + +### Graph client + +The Graph is providing it own GraphQL client, `graph-client` that supports unique features such as: + +- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query +- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Fully typed result + +Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. + +Let's look at how to fetch data from a subgraph with `graphql-client`. + +To get started, make sure to install The Graph Client CLI in your project: + +```sh +yarn add -D @graphprotocol/client-cli +# or, with NPM: +npm install --save-dev @graphprotocol/client-cli +``` + +Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): + +```graphql +query ExampleQuery { + # this one is coming from compound-v2 + markets(first: 7) { + borrowRate + cash + collateralFactor + } + # this one is coming from uniswap-v2 + pair(id: "0x00004ee988665cdda9a1080d5792cecd16dc1220") { + id + token0 { + id + symbol + name + } + token1 { + id + symbol + name + } + } +} +``` + +Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: + +```yaml +# .graphclientrc.yml +sources: + - name: uniswapv2 + handler: + graphql: + endpoint: https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2 + - name: compoundv2 + handler: + graphql: + endpoint: https://api.thegraph.com/subgraphs/name/graphprotocol/compound-v2 + +documents: + - ./src/example-query.graphql +``` + +Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: + +```sh +graphclient build +``` + +Finally, update your `.ts` file to use the generated typed GraphQL documents: + +```tsx +import React, { useEffect } from 'react' +// ... +// we import types and typed-graphql document from the generated code (`..graphclient/`) +import { ExampleQueryDocument, ExampleQueryQuery, execute } from '../.graphclient' + +function App() { + const [data, setData] = React.useState() + + useEffect(() => { + execute(ExampleQueryDocument, {}).then((result) => { + setData(result?.data) + }) + }, [setData]) + return ( +
+
+ logo +

Graph Client Example

+
+ {data && ( +
+ +
+