-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathhelp.html
More file actions
110 lines (110 loc) · 97.1 KB
/
help.html
File metadata and controls
110 lines (110 loc) · 97.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
<!DOCTYPE html><html lang="en" class="dark">
<!-- Mirrored from guide.gpt-trainer.com/help by HTTrack Website Copier/3.x [XR&CO'2014], Tue, 07 Jan 2025 14:53:24 GMT -->
<!-- Added by HTTrack --><meta http-equiv="content-type" content="text/html;charset=utf-8" /><!-- /Added by HTTrack -->
<head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><link rel="apple-touch-icon" type="image/png" sizes="180x180" href="../mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/apple-touch-icon30f4.png?v=3"/><link rel="icon" type="image/png" sizes="32x32" href="../mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/favicon-32x3230f4.png?v=3"/><link rel="icon" type="image/png" sizes="16x16" href="../mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/favicon-16x1630f4.png?v=3"/><link rel="shortcut icon" type="image/x-icon" href="https://mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/favicon.ico?v=3"/><meta name="msapplication-config" content="https://mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/browserconfig.xml?v=3"/><meta name="apple-mobile-web-app-title" content="GPT-trainer API"/><meta name="application-name" content="GPT-trainer API"/><meta name="msapplication-TileColor" content="#2E3F51"/><meta name="theme-color" content="#ffffff"/><link rel="sitemap" type="application/xml" href="sitemap.xml"/><meta name="charset" content="utf-8"/><meta name="og:type" content="website"/><meta name="og:site_name" content="GPT-trainer API"/><meta name="twitter:card" content="summary_large_image"/><meta name="og:title" content="Why does my chatbot not answer correctly? - GPT-trainer API"/><meta name="twitter:title" content="Why does my chatbot not answer correctly? - GPT-trainer API"/><meta name="og:image" content="https://mintlify.com/docs/api/og?division=Documentation&title=Why+does+my+chatbot+not+answer+correctly%3F&logoLight=https%3A%2F%2Fmintlify.s3.us-west-1.amazonaws.com%2Fpaladinmaxinc%2Flogo%2Flight.svg&logoDark=https%3A%2F%2Fmintlify.s3.us-west-1.amazonaws.com%2Fpaladinmaxinc%2Flogo%2Fdark.svg&primaryColor=%232E3F51&lightColor=%23516F90&darkColor=%230D001D"/><meta name="twitter:image" content="https://mintlify.com/docs/api/og?division=Documentation&title=Why+does+my+chatbot+not+answer+correctly%3F&logoLight=https%3A%2F%2Fmintlify.s3.us-west-1.amazonaws.com%2Fpaladinmaxinc%2Flogo%2Flight.svg&logoDark=https%3A%2F%2Fmintlify.s3.us-west-1.amazonaws.com%2Fpaladinmaxinc%2Flogo%2Fdark.svg&primaryColor=%232E3F51&lightColor=%23516F90&darkColor=%230D001D"/><title>Why does my chatbot not answer correctly? - GPT-trainer API</title><meta name="og:url" content="/help"/><link rel="canonical" href="help.html"/><meta name="next-head-count" content="23"/><link rel="stylesheet" href="../cdn.jsdelivr.net/npm/katex%400.16.0/dist/katex.min.css" integrity="sha384-Xi8rHCmBmhbuyyhbI88391ZKP2dmfnOl4rT9ZfRI7mLTdk1wblIUnrIq35nqwEvC" crossorigin="anonymous"/><link rel="preload" href="_next/static/media/a34f9d1faa5f3315-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="_next/static/media/bb3ef058b751a6ad-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><script id="mode-toggle" data-nscript="beforeInteractive">
try {
if (localStorage.isDarkMode === 'true') {
document.documentElement.classList.add('dark');
} else if (localStorage.isDarkMode === 'false') {
document.documentElement.classList.remove('dark');
} else if ((true && !('isDarkMode' in localStorage) && window.matchMedia('(prefers-color-scheme: dark)').matches) || false) {
document.documentElement.classList.add('dark');
} else {
document.documentElement.classList.remove('dark');
}
} catch (_) {}
</script><link rel="preload" href="_next/static/css/16035c2adeba2fd7.css" as="style"/><link rel="stylesheet" href="_next/static/css/16035c2adeba2fd7.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="_next/static/chunks/polyfills-42372ed130431b0a.js"></script><script src="_next/static/chunks/webpack-99a660de06a74703.js" defer=""></script><script src="_next/static/chunks/framework-44a6e5dc2ffde502.js" defer=""></script><script src="_next/static/chunks/main-6f86f9a153903fae.js" defer=""></script><script src="_next/static/chunks/pages/_app-1767ed3009913161.js" defer=""></script><script src="_next/static/chunks/2edb282b-7fa355f49eaeb230.js" defer=""></script><script src="_next/static/chunks/e893f787-54a006ae51267903.js" defer=""></script><script src="_next/static/chunks/086d643d-3aa1cf46914548d9.js" defer=""></script><script src="_next/static/chunks/9097-75a5bfb192203d09.js" defer=""></script><script src="_next/static/chunks/7669-2cdcbf7436d2d1bd.js" defer=""></script><script src="_next/static/chunks/5339-37d37f3a0f878abf.js" defer=""></script><script src="_next/static/chunks/4922-ba19e45713cda605.js" defer=""></script><script src="_next/static/chunks/pages/_sites/%5bsubdomain%5d/%5b%5b...slug%5d%5d-7f2259a5793aeffc.js" defer=""></script><script src="_next/static/pChs_9tFT1YAEINLWWPhQ/_buildManifest.js" defer=""></script><script src="_next/static/pChs_9tFT1YAEINLWWPhQ/_ssgManifest.js" defer=""></script><style id="__jsx-4145347147">:root{--font-inter:'__Inter_e5ab12', '__Inter_Fallback_e5ab12';--font-jetbrains-mono:'__JetBrains_Mono_3c557b', '__JetBrains_Mono_Fallback_3c557b'}</style></head><div id="__next"><main class="jsx-4145347147"><style>:root {
--primary: 46 63 81;
--primary-light: 81 111 144;
--primary-dark: 13 0 29;
--background-light: 255 255 255;
--background-dark: 17 24 39;
--gray-50: 244 244 245;
--gray-100: 239 239 240;
--gray-200: 223 224 224;
--gray-300: 207 207 208;
--gray-400: 159 160 160;
--gray-500: 113 113 114;
--gray-600: 81 81 82;
--gray-700: 63 64 65;
--gray-800: 38 39 39;
--gray-900: 24 24 25;
--gray-950: 11 12 12;
}</style><div class="relative antialiased text-gray-500 dark:text-gray-400"><span class="fixed inset-0 bg-background-light dark:bg-background-dark -z-10"></span><div id="navbar" class="z-30 fixed lg:sticky top-0 w-full"><div id="navbar-transition" class="absolute w-full h-full backdrop-blur flex-none transition-colors duration-500 border-b border-gray-500/5 dark:border-gray-300/[0.06] supports-backdrop-blur:bg-background-light/60 dark:bg-transparent"></div><div class="max-w-8xl mx-auto relative"><div class=""><div class="relative"><div class="flex items-center lg:px-12 h-16 min-w-0 px-4"><div class="h-full relative flex-1 flex items-center gap-x-4 min-w-0 border-b border-gray-500/5 dark:border-gray-300/[0.06] lg:border-none"><div class="flex-1 flex items-center gap-x-4"><a href="index.html"><span class="sr-only">GPT-trainer API<!-- --> home page</span><img class="w-auto h-7 relative object-contain block dark:hidden" src="https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/light.svg" alt="light logo"/><img class="w-auto h-7 relative object-contain hidden dark:block" src="https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/dark.svg" alt="dark logo"/></a><div class="flex items-center gap-x-2"></div></div><div class="hidden lg:block mx-px relative flex-1 bg-white dark:bg-gray-900 pointer-events-auto rounded-xl min-w-0"><button type="button" class="w-full flex items-center text-sm leading-6 rounded-xl py-1.5 pl-3.5 pr-3 shadow-sm text-gray-400 dark:text-white/50 bg-background-light dark:bg-background-dark dark:brightness-[1.1] dark:ring-1 dark:hover:brightness-[1.25] ring-1 ring-gray-400/20 hover:ring-gray-600/25 dark:ring-gray-600/30 dark:hover:ring-gray-500/30 focus:outline-primary justify-between truncate gap-2 min-w-[43px]" id="search-bar-entry"><div class="flex items-center gap-3 min-w-[42px]"><svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="lucide lucide-search min-w-4 flex-none text-gray-700 hover:text-gray-800 dark:text-gray-300 hover:dark:text-gray-200"><circle cx="11" cy="11" r="8"></circle><path d="m21 21-4.3-4.3"></path></svg><div class="truncate min-w-0">Search...</div></div></button></div><div class="flex-1 relative hidden lg:flex items-center ml-auto justify-end space-x-4"><nav class="text-sm"><ul class="flex space-x-6 items-center"><li><a href="mailto:hello@gpt-trainer.com" class="whitespace-nowrap font-medium text-gray-600 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" target="_blank">Support</a></li></ul></nav><div class="flex items-center"><button class="group p-2 flex items-center justify-center" aria-label="Toggle dark mode"><svg width="16" height="16" viewBox="0 0 16 16" fill="none" stroke="currentColor" xmlns="http://www.w3.org/2000/svg" class="h-4 w-4 block text-gray-400 dark:hidden group-hover:text-gray-600"><g clip-path="url(#clip0_2880_7340)"><path d="M8 1.11133V2.00022" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M12.8711 3.12891L12.2427 3.75735" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M14.8889 8H14" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M12.8711 12.8711L12.2427 12.2427" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M8 14.8889V14" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M3.12891 12.8711L3.75735 12.2427" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M1.11133 8H2.00022" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M3.12891 3.12891L3.75735 3.75735" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path><path d="M8.00043 11.7782C10.0868 11.7782 11.7782 10.0868 11.7782 8.00043C11.7782 5.91402 10.0868 4.22266 8.00043 4.22266C5.91402 4.22266 4.22266 5.91402 4.22266 8.00043C4.22266 10.0868 5.91402 11.7782 8.00043 11.7782Z" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path></g><defs><clipPath id="clip0_2880_7340"><rect width="16" height="16" fill="white"></rect></clipPath></defs></svg><svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="lucide lucide-moon h-4 w-4 hidden dark:block text-gray-500 dark:group-hover:text-gray-300"><path d="M12 3a6 6 0 0 0 9 9 9 9 0 1 1-9-9Z"></path></svg></button></div></div><div class="flex lg:hidden items-center gap-2"><button type="button" class="text-gray-500 w-8 h-8 flex items-center justify-center hover:text-gray-600 dark:text-gray-400 dark:hover:text-gray-300" id="search-bar-entry-mobile"><span class="sr-only">Search...</span><svg class="h-4 w-4 bg-gray-500 dark:bg-gray-400 hover:bg-gray-600 dark:hover:bg-gray-300" style="-webkit-mask-image:url(https://mintlify.b-cdn.net/v6.6.0/solid/magnifying-glass.svg);-webkit-mask-repeat:no-repeat;-webkit-mask-position:center"></svg></button><button aria-label="More actions" class="h-7 w-5 flex items-center justify-end"><svg class="h-4 w-4 bg-gray-500 dark:bg-gray-400 hover:bg-gray-600 dark:hover:bg-gray-300" style="-webkit-mask-image:url(https://mintlify.b-cdn.net/v6.6.0/solid/ellipsis-vertical.svg);-webkit-mask-repeat:no-repeat;-webkit-mask-position:center"></svg></button></div></div></div><div class="flex items-center h-14 py-4 px-5 lg:hidden"><button type="button" class="text-gray-500 hover:text-gray-600 dark:text-gray-400 dark:hover:text-gray-300"><span class="sr-only">Navigation</span><svg class="h-4" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><path d="M0 96C0 78.3 14.3 64 32 64H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32C14.3 128 0 113.7 0 96zM0 256c0-17.7 14.3-32 32-32H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32c-17.7 0-32-14.3-32-32zM448 416c0 17.7-14.3 32-32 32H32c-17.7 0-32-14.3-32-32s14.3-32 32-32H416c17.7 0 32 14.3 32 32z"></path></svg></button><div class="ml-4 flex text-sm leading-6 whitespace-nowrap min-w-0 space-x-3"><div class="flex items-center space-x-3"><span>Guides</span><svg width="3" height="24" viewBox="0 -9 3 24" class="h-5 rotate-0 overflow-visible fill-gray-400"><path d="M0 0L3 3L0 6" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round"></path></svg></div><div class="font-semibold text-gray-900 truncate dark:text-gray-200">Why does my chatbot not answer correctly?</div></div></div></div></div></div></div><div class="max-w-8xl px-4 mx-auto lg:px-8 min-h-screen"><div class="z-20 hidden lg:block fixed bottom-0 right-auto w-[18rem] top-[4rem]" id="sidebar"><div class="absolute inset-0 z-10 stable-scrollbar-gutter overflow-auto pr-8 pb-10" id="sidebar-content"><div class="relative lg:text-sm lg:leading-6"><div class="sticky top-0 h-8 bg-gradient-to-b from-background-light dark:from-background-dark"></div><div id="navigation-items"><li class="list-none"><a class="pl-4 group flex items-center lg:text-sm lg:leading-6 mb-5 sm:mb-4 font-semibold text-primary dark:text-primary-light" href="introduction.html"><div style="background:linear-gradient(45deg, #ED727B, #F6B7BB)" class="mr-4 rounded-md p-1"><svg class="h-4 w-4 secondary-opacity group-hover:fill-primary-dark group-hover:bg-white bg-white" style="-webkit-mask-image:url(https://mintlify.b-cdn.net/v6.6.0/duotone/book-open.svg);-webkit-mask-repeat:no-repeat;-webkit-mask-position:center"></svg></div>Documentation</a></li><li class="list-none"><a class="pl-4 group flex items-center lg:text-sm lg:leading-6 mb-5 sm:mb-4 font-medium text-gray-600 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" href="api-reference/api-key-setup.html"><div class="mr-4 rounded-md p-1 zinc-box group-hover:brightness-100 group-hover:ring-0 ring-1 ring-gray-950/5 dark:ring-gray-700/40"><svg class="h-4 w-4 secondary-opacity group-hover:fill-primary-dark group-hover:bg-white bg-gray-400 dark:bg-gray-500" style="-webkit-mask-image:url(https://mintlify.b-cdn.net/v6.6.0/duotone/code.svg);-webkit-mask-repeat:no-repeat;-webkit-mask-position:center"></svg></div>API References</a></li><li class="list-none"><a class="pl-4 group flex items-center lg:text-sm lg:leading-6 mb-5 sm:mb-4 font-medium text-gray-600 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" href="tools/tools-intro.html"><div class="mr-4 rounded-md p-1 zinc-box group-hover:brightness-100 group-hover:ring-0 ring-1 ring-gray-950/5 dark:ring-gray-700/40"><svg class="h-4 w-4 secondary-opacity group-hover:fill-primary-dark group-hover:bg-white bg-gray-400 dark:bg-gray-500" style="-webkit-mask-image:url(https://mintlify.b-cdn.net/v6.6.0/duotone/gear.svg);-webkit-mask-repeat:no-repeat;-webkit-mask-position:center"></svg></div>Tools</a></li><li class="list-none"><a class="pl-4 group flex items-center lg:text-sm lg:leading-6 mb-5 sm:mb-4 font-medium text-gray-600 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" href="whitelabel/whitelabel-intro.html"><div class="mr-4 rounded-md p-1 zinc-box group-hover:brightness-100 group-hover:ring-0 ring-1 ring-gray-950/5 dark:ring-gray-700/40"><svg class="h-4 w-4 secondary-opacity group-hover:fill-primary-dark group-hover:bg-white bg-gray-400 dark:bg-gray-500" style="-webkit-mask-image:url(https://mintlify.b-cdn.net/v6.6.0/duotone/browser.svg);-webkit-mask-repeat:no-repeat;-webkit-mask-position:center"></svg></div>Whitelabel</a></li><div class="mt-12 lg:mt-8"><h5 class="pl-4 mb-3.5 lg:mb-2.5 font-semibold text-gray-900 dark:text-gray-200">Getting Started</h5><ul><li id="/introduction" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="introduction.html"><div class="flex-1 flex items-center space-x-2.5"><div>Introduction</div></div></a></li></ul></div><div class="mt-12 lg:mt-8"><h5 class="pl-4 mb-3.5 lg:mb-2.5 font-semibold text-gray-900 dark:text-gray-200">Guides</h5><ul><li id="/creating-first-chatbot" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="creating-first-chatbot.html"><div class="flex-1 flex items-center space-x-2.5"><div>Create Your First Chatbot</div></div></a></li><li id="/lead-collection" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="lead-collection.html"><div class="flex-1 flex items-center space-x-2.5"><div>Lead Collection</div></div></a></li><li id="/human-support-escalation" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="human-support-escalation.html"><div class="flex-1 flex items-center space-x-2.5"><div>Human Support Escalation</div></div></a></li><li id="/inbox-notifications" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="inbox-notifications.html"><div class="flex-1 flex items-center space-x-2.5"><div>Inbox Notifications</div></div></a></li><li id="/conversation-labeling" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="conversation-labeling.html"><div class="flex-1 flex items-center space-x-2.5"><div>Conversation Labeling</div></div></a></li><li id="/multi-agents-chatbot" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="multi-agents-chatbot.html"><div class="flex-1 flex items-center space-x-2.5"><div>Multi-Agents Chatbot</div></div></a></li><li id="/fine-tuning-agent-intents" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="fine-tuning-agent-intents.html"><div class="flex-1 flex items-center space-x-2.5"><div>Fine Tuning Agent Intents</div></div></a></li><li id="/supervisor-overrides" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="supervisor-overrides.html"><div class="flex-1 flex items-center space-x-2.5"><div>AI Supervisor Overrides</div></div></a></li><li id="/byok-pricing-guide" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="byok-pricing-guide.html"><div class="flex-1 flex items-center space-x-2.5"><div>Bring Your Own Key (BYOK) and Pricing</div></div></a></li><li id="/working-with-tables" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="working-with-tables.html"><div class="flex-1 flex items-center space-x-2.5"><div>Working with Tables and CSV</div></div></a></li><li id="/best-practices" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="best-practices.html"><div class="flex-1 flex items-center space-x-2.5"><div>Best practices for preparing training data</div></div></a></li><li id="/help" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl bg-primary/10 text-primary font-semibold dark:text-primary-light dark:bg-primary-light/10" style="padding-left:1rem" href="help.html"><div class="flex-1 flex items-center space-x-2.5"><div>Why does my chatbot not answer correctly?</div></div></a></li></ul></div><div class="mt-12 lg:mt-8"><h5 class="pl-4 mb-3.5 lg:mb-2.5 font-semibold text-gray-900 dark:text-gray-200">Function Calling</h5><ul><li id="/rag-from-external-data-provider" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="rag-from-external-data-provider.html"><div class="flex-1 flex items-center space-x-2.5"><div>RAG from an External Data Provider</div></div></a></li></ul></div><div class="mt-12 lg:mt-8"><h5 class="pl-4 mb-3.5 lg:mb-2.5 font-semibold text-gray-900 dark:text-gray-200">Authentication Webhook</h5><ul><li id="/user-identity" class="scroll-m-4 first:scroll-m-20"><a class="group mt-2 lg:mt-0 flex items-center pr-3 py-1.5 cursor-pointer focus:outline-primary dark:focus:outline-primary-light space-x-3 rounded-xl hover:bg-gray-600/5 dark:hover:bg-gray-200/5 text-gray-700 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300" style="padding-left:1rem" href="user-identity.html"><div class="flex-1 flex items-center space-x-2.5"><div>User Identity Verification</div></div></a></li></ul></div></div></div></div></div><div class="" id="content-container"><div class="flex flex-row gap-12 box-border w-full pt-40 lg:pt-10"><div class="relative grow box-border flex-col w-full mx-auto px-1 lg:pl-[23.7rem] lg:-ml-12 xl:w-[calc(100%-28rem)]" id="content-area"><header id="header" class="relative"><div class="mt-0.5 space-y-2.5"><div class="eyebrow h-5 text-primary dark:text-primary-light text-sm font-semibold">Guides</div><div class="flex items-center"><h1 class="inline-block text-2xl sm:text-3xl font-extrabold text-gray-900 tracking-tight dark:text-gray-200">Why does my chatbot not answer correctly?</h1></div></div></header><div class="flex flex-col gap-8"><div class="flex flex-col gap-6 xl:hidden [&:not(:empty)]:mt-8"></div></div><div class="relative mt-8 prose prose-gray dark:prose-invert"><p>When Google scientists first introduced the concept of the “transformer” (<a href="https://arxiv.org/pdf/1706.03762.pdf" target="_blank" rel="noreferrer">https://arxiv.org/pdf/1706.03762.pdf</a>), the idea received attention within
the Natural Language Processing (NLP) community. It paved a new direction for researchers to develop neural network models with applications towards
natural language understanding.</p>
<p>Not long after, OpenAI popularized Generative Pre-trained Transformers (GPT) and pioneered Large Language Models (LLMs) through ChatGPT. The conversational
performance of these models made people believe that AI is rapidly approaching human-level intelligence, and can therefore be trained to perform a variety
of human-capable tasks.</p>
<p>However, despite how “human” these LLMs may seem, they are far from truly seeing, interpreting, and understanding the world the way humans do.
These models should be regarded as “probabilistic conversation simulators” rather than true analytical engines. The governing concept behind these
models is pattern matching and semantic correlations, not true logic and reasoning.</p>
<p>GPT-trainer is powered by OpenAI’s LLMs. It utilizes Retrieval Augmented Generation (RAG) technology to tune its responses to the data that you upload
as reference context.</p>
<h2 class="flex whitespace-pre-wrap group" id="what-is-retrieval-augmented-generation-rag"><div class="absolute"><a href="#what-is-retrieval-augmented-generation-rag" class="-ml-10 flex items-center opacity-0 border-0 group-hover:opacity-100" aria-label="Navigate to header"><div class="w-6 h-6 text-gray-400 rounded-md flex items-center justify-center zinc-box bg-white ring-1 ring-gray-400/30 dark:ring-gray-700/25 hover:ring-gray-400/60 dark:hover:ring-white/20"><svg xmlns="http://www.w3.org/2000/svg" fill="gray" height="12px" viewBox="0 0 576 512"><path d="M0 256C0 167.6 71.6 96 160 96h72c13.3 0 24 10.7 24 24s-10.7 24-24 24H160C98.1 144 48 194.1 48 256s50.1 112 112 112h72c13.3 0 24 10.7 24 24s-10.7 24-24 24H160C71.6 416 0 344.4 0 256zm576 0c0 88.4-71.6 160-160 160H344c-13.3 0-24-10.7-24-24s10.7-24 24-24h72c61.9 0 112-50.1 112-112s-50.1-112-112-112H344c-13.3 0-24-10.7-24-24s10.7-24 24-24h72c88.4 0 160 71.6 160 160zM184 232H392c13.3 0 24 10.7 24 24s-10.7 24-24 24H184c-13.3 0-24-10.7-24-24s10.7-24 24-24z"></path></svg></div></a></div><span class="cursor-pointer">What is Retrieval Augmented Generation (RAG)?</span></h2>
<p>Large Language Models (LLMs) are trained on enormous amounts of text data. Based on this data, the LLM will identify patterns and try to
replicate them during its own text generation. When producing an output, LLMs start from a user-written prompt, then algorithmically
assigns probabilities to “tokens” or words that most likely succeed (follow after) the prompt based on patterns observed within its original
training data. This is why OpenAI named a number of its API endpoints “Chat Completions” - the model tries to “complete” the user’s input query.</p>
<div class="my-4 px-5 py-4 overflow-hidden rounded-2xl flex gap-3 border border-sky-500/20 bg-sky-50/50 dark:border-sky-500/30 dark:bg-sky-500/10"><div class="mt-0.5 w-4"><svg width="14" height="14" viewBox="0 0 14 14" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="w-4 h-4 text-sky-500" aria-label="Note"><path fill-rule="evenodd" clip-rule="evenodd" d="M7 1.3C10.14 1.3 12.7 3.86 12.7 7C12.7 10.14 10.14 12.7 7 12.7C5.48908 12.6974 4.0408 12.096 2.97241 11.0276C1.90403 9.9592 1.30264 8.51092 1.3 7C1.3 3.86 3.86 1.3 7 1.3ZM7 0C3.14 0 0 3.14 0 7C0 10.86 3.14 14 7 14C10.86 14 14 10.86 14 7C14 3.14 10.86 0 7 0ZM8 3H6V8H8V3ZM8 9H6V11H8V9Z"></path></svg></div><div class="text-sm prose min-w-0 text-sky-900 dark:text-sky-200"><p>To better understand what “tokens” are in the context of LLMs, please refer to
the following article from OpenAI’s own documentation:
<a href="https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them" target="_blank" rel="noreferrer">https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them</a></p></div></div>
<p>But patterns do not necessarily imply truth, consistency, or logical compliance, just as correlation does not imply causation.
This is the reason LLMs often “hallucinate” when producing a response. Retrieval Augmented Generation (RAG) tries to remedy
this problem by “biasing” the aforementioned probabilities during text generation via additional context injected into the prompt.</p>
<p>To further illustrate this, we walk you through the following example:</p>
<p>When you ask a question to ChatGPT, a typical query might look something like:</p>
<div class="mt-5 mb-8 not-prose rounded-2xl relative text-gray-50 bg-[#0F1117] dark:bg-codeblock dark:ring-1 dark:ring-gray-800/50 codeblock-dark"><div class="min-w-full relative text-sm leading-6 children:!my-0 children:!shadow-none children:!bg-transparent transition-[height] duration-300 ease-in-out" style="font-variant-ligatures:none;height:auto"><div class="overflow-x-auto h-full p-5 overflow-y-hidden scrollbar-thin scrollbar-thumb-rounded scrollbar-thumb-white/20 dark:scrollbar-thumb-white/20 hover:scrollbar-thumb-white/25 dark:hover:scrollbar-thumb-white/25 active:scrollbar-thumb-white/25 dark:active:scrollbar-thumb-white/25"><pre class="language-plaintext"><code><span class="">What is GPT-trainer?</span>
</code></pre></div></div></div>
<p>This is a query that relies fully on the semantic patterns within the model’s own foundational training data to generate an answer. There is no guarantee that the answer will be accurate or trustworthy.</p>
<p>But what if the prompt changes to:</p>
<div class="mt-5 mb-8 not-prose rounded-2xl relative text-gray-50 bg-[#0F1117] dark:bg-codeblock dark:ring-1 dark:ring-gray-800/50 codeblock-dark"><div class="min-w-full relative text-sm leading-6 children:!my-0 children:!shadow-none children:!bg-transparent transition-[height] duration-300 ease-in-out" style="font-variant-ligatures:none;height:auto"><div class="overflow-x-auto h-full p-5 overflow-y-hidden scrollbar-thin scrollbar-thumb-rounded scrollbar-thumb-white/20 dark:scrollbar-thumb-white/20 hover:scrollbar-thumb-white/25 dark:hover:scrollbar-thumb-white/25 active:scrollbar-thumb-white/25 dark:active:scrollbar-thumb-white/25"><pre class="language-plaintext"><code><span class="">Answer the question below using the following provided context:</span>
<!-- -->
<span class="">GPT-trainer is a powerful no-code/low-code framework that allows you to build</span>
<span class="">multi-agent chatbots with function-calling capabilities using your own data. It is designed to be user-friendly</span>
<span class="">and versatile, providing customization options and integration with popular platforms.</span>
<!-- -->
<span class="">What is GPT-trainer?</span>
</code></pre></div></div></div>
<p>Now, the model has a lot more to work with. In fact, the semantic patterns identified in the prompt itself can closely guide the model when answering
the user’s question. This “added context” biases the model to generate new tokens in a semantically similar fashion.</p>
<p>RAG is the process of “enriching” the input query with additional context so that the model answers questions based on provided information.
It does not modify or fine-tune the foundational LLM itself, but rather injects the user’s original prompt with data residing elsewhere.</p>
<p>So now you might ask, why don’t I just throw my entire 15-million-words library of blog articles directly into the LLM as reference context
every time I ask a question?</p>
<p>Well, if the LLM is large enough to take in so many tokens (words) as input all at once, then sure, it’s perfectly viable to do so, provided you can afford the costs.</p>
<p>However, we use OpenAI’s large language models (LLMs), all of which have token limits. The token limit dictates how much “effective content”
can be used as context. For GPT-trainer’s chat, we can fit about 10,000 words using the gpt-3.5-16k model.</p>
<p>The chatbot sits on top of many documents that, altogether, usually contain far more than 10,000 words. This means that we cannot fit everything
into the token limit all the time.</p>
<p>We get around this problem by splitting long documents into chunks, calculating embeddings for each chunk, and storing them piecewise into a
vector database. Embeddings can be thought of as mathematical representations of the meaning behind a snippet of text. It is like a
“universal human language” of sorts, except spoken by machines and represented in mathematical vectors. Natural language statements
that are semantically similar will be “physically” closer together in the embedding vector space. Here is a good article explaining
“text embeddings” in greater detail: <a href="https://stackoverflow.blog/2023/11/09/an-intuitive-introduction-to-text-embeddings/" target="_blank" rel="noreferrer">https://stackoverflow.blog/2023/11/09/an-intuitive-introduction-to-text-embeddings/</a></p>
<p>Every time you enter an AI query, we algorithmically search the database for relevant chunks to use as reference based on embedding distance.
This is all done independently from the LLM query itself. The LLM does not actively participate in this “chunk selection” step when deciding
what information to include in the context.</p>
<p>Afterwards, the top chunks get included as context and injected into the user’s original query. This is the basis of Retrieval Augmented
Generation (RAG). Remember our example from earlier? When the user asks:</p>
<div class="mt-5 mb-8 not-prose rounded-2xl relative text-gray-50 bg-[#0F1117] dark:bg-codeblock dark:ring-1 dark:ring-gray-800/50 codeblock-dark"><div class="min-w-full relative text-sm leading-6 children:!my-0 children:!shadow-none children:!bg-transparent transition-[height] duration-300 ease-in-out" style="font-variant-ligatures:none;height:auto"><div class="overflow-x-auto h-full p-5 overflow-y-hidden scrollbar-thin scrollbar-thumb-rounded scrollbar-thumb-white/20 dark:scrollbar-thumb-white/20 hover:scrollbar-thumb-white/25 dark:hover:scrollbar-thumb-white/25 active:scrollbar-thumb-white/25 dark:active:scrollbar-thumb-white/25"><pre class="language-plaintext"><code><span class="">What is GPT-trainer?</span>
</code></pre></div></div></div>
<p>the most relevant chunk that will likely be pulled from our vector database is:</p>
<div class="mt-5 mb-8 not-prose rounded-2xl relative text-gray-50 bg-[#0F1117] dark:bg-codeblock dark:ring-1 dark:ring-gray-800/50 codeblock-dark"><div class="min-w-full relative text-sm leading-6 children:!my-0 children:!shadow-none children:!bg-transparent transition-[height] duration-300 ease-in-out" style="font-variant-ligatures:none;height:auto"><div class="overflow-x-auto h-full p-5 overflow-y-hidden scrollbar-thin scrollbar-thumb-rounded scrollbar-thumb-white/20 dark:scrollbar-thumb-white/20 hover:scrollbar-thumb-white/25 dark:hover:scrollbar-thumb-white/25 active:scrollbar-thumb-white/25 dark:active:scrollbar-thumb-white/25"><pre class="language-plaintext"><code><span class="">GPT-trainer is a powerful no-code/low-code framework that allows you to build multi-agent chatbots with function-calling</span>
<span class="">capabilities using your own data. It is designed to be user-friendly and versatile, providing customization options and</span>
<span class="">integration with popular platforms.</span>
</code></pre></div></div></div>
<p>and the chatbot will now “know” the answer.</p>
<p>Here is a good visual from AWS illustrating at a high level the series of RAG steps:</p>
<div><div class="p-2 not-prose relative bg-gray-50/50 rounded-2xl overflow-hidden dark:bg-gray-800/25"><div style="background-position:10px 10px" class="absolute inset-0 bg-grid-neutral-200/20 [mask-image:linear-gradient(0deg,#fff,rgba(255,255,255,0.6))] dark:bg-grid-white/5 dark:[mask-image:linear-gradient(0deg,rgba(255,255,255,0.1),rgba(255,255,255,0.5))]"></div><div class="relative rounded-xl overflow-hidden flex justify-center"></div><div class="relative rounded-2xl flex justify-center mt-3 pt-0 px-8 pb-2 text-sm text-gray-700 dark:text-gray-400"><p>https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-customize-rag.html</p></div><div class="absolute inset-0 pointer-events-none border border-black/5 rounded-2xl dark:border-white/5"></div></div></div>
<p>However, this approach also has limitations. The following types of queries generally work well:</p>
<table><thead><tr><th>Query Type</th><th>Definition</th><th>Example</th></tr></thead><tbody><tr><td>Information Retrieval</td><td>Asking for specific information residing within one or more documents</td><td>”What is Paladin Max, Inc.’s PTO policy?”</td></tr><tr><td>Topic-centric Summarization</td><td>Aggregating information centered around a theme or topic</td><td>”Summarize the latest developments in generative AI”</td></tr></tbody></table>
<p>The following types of queries may not work as well:</p>
<table><thead><tr><th>Query Type</th><th>Definition</th><th>Example</th></tr></thead><tbody><tr><td>Document Comparison</td><td>Comparing documents without an explicit criteria</td><td>”Find any inconsistencies across the arguments presented across my documents and list them.”</td></tr><tr><td>Counting or Math</td><td>Counting mentions or performing quantitative analysis based on document content</td><td>”How many times was John named across the contracts?”</td></tr><tr><td>Meta-Level Instructions</td><td>Directing the AI to trace document structure or reference content on specific sections or pages</td><td>”Identify key points from section 3 of business_report.pdf, covering pages 33-37.”</td></tr><tr><td>Library-wide Metadata Inquiries</td><td>Asking about properties or aggregate statistics of multiple documents inside the library</td><td>”How many documents talk about xx topic and list them in a table.”</td></tr><tr><td>Longform Text Generation</td><td>Writing extended text based on provided documents</td><td>”Write a 5000 words literature review.”</td></tr></tbody></table>
<p>If your use case demands it, our multi-Agent architecture and function-calling support may help address some of these complexities,
but they require advanced configuration on your part. Please refer to our other related articles for best practices when deploying
multi-agent chatbots with function-calling capabilities.</p>
<p>There may be alternative ways for you to optimize your chatbot’s performance by improving the quality of your training data.
To learn how, please check out <a href="best-practices.html">Best practices for preparing training data</a>.</p></div><div class="leading-6 mt-14"><div class="mb-12 px-0.5 flex items-center text-sm font-semibold text-gray-700 dark:text-gray-200"><a class="flex items-center space-x-3 group" href="best-practices.html"><svg viewBox="0 0 3 6" class="h-1.5 stroke-gray-400 overflow-visible group-hover:stroke-gray-600 dark:group-hover:stroke-gray-300"><path d="M3 0L0 3L3 6" fill="none" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path></svg><span class="group-hover:text-gray-900 dark:group-hover:text-white">Best practices for preparing training data</span></a><a class="flex items-center ml-auto space-x-3 group" href="rag-from-external-data-provider.html"><span class="group-hover:text-gray-900 dark:group-hover:text-white">RAG from an External Data Provider</span><svg viewBox="0 0 3 6" class="rotate-180 h-1.5 stroke-gray-400 overflow-visible group-hover:stroke-gray-600 dark:group-hover:stroke-gray-300"><path d="M3 0L0 3L3 6" fill="none" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path></svg></a></div><footer id="footer" class="flex gap-12 justify-between pt-10 border-t border-gray-100 sm:flex dark:border-gray-800/50 pb-28"><div class="flex items-center justify-between"><div class="sm:flex"><a href="https://mintlify.com/preview-request?utm_campaign=poweredBy&utm_medium=docs&utm_source=guide.gpt-trainer.com" target="_blank" rel="noreferrer" class="text-sm text-gray-500 dark:text-gray-400 hover:text-gray-700 dark:hover:text-gray-300">Powered by Mintlify</a></div></div></footer></div></div><div class="z-10 hidden xl:flex pl-10 box-border w-[19rem]" id="table-of-contents"><div id="table-of-contents-content" class="fixed text-gray-600 text-sm leading-6 w-[16.5rem] overflow-y-auto space-y-2 h-[calc(100%-7rem)]"><div class="text-gray-700 dark:text-gray-300 font-medium flex items-center space-x-2"><svg width="16" height="16" viewBox="0 0 16 16" fill="none" stroke="currentColor" stroke-width="2" xmlns="http://www.w3.org/2000/svg" class="h-3 w-3"><path d="M2.44434 12.6665H13.5554" stroke-linecap="round" stroke-linejoin="round"></path><path d="M2.44434 3.3335H13.5554" stroke-linecap="round" stroke-linejoin="round"></path><path d="M2.44434 8H7.33323" stroke-linecap="round" stroke-linejoin="round"></path></svg><span>On this page</span></div><ul><li><a href="#what-is-retrieval-augmented-generation-rag" class="py-1 block hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-300">What is Retrieval Augmented Generation (RAG)?</a></li></ul></div></div></div></div></div></div></main></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"mdxSource":{"compiledSource":"\"use strict\";\nconst {Fragment: _Fragment, jsx: _jsx, jsxs: _jsxs} = arguments[0];\nconst {useMDXComponents: _provideComponents} = arguments[0];\nfunction _createMdxContent(props) {\n const _components = {\n a: \"a\",\n code: \"code\",\n p: \"p\",\n pre: \"pre\",\n span: \"span\",\n table: \"table\",\n tbody: \"tbody\",\n td: \"td\",\n th: \"th\",\n thead: \"thead\",\n tr: \"tr\",\n ..._provideComponents(),\n ...props.components\n }, {CodeBlock, Frame, Heading, Note, ZoomImage} = _components;\n if (!CodeBlock) _missingMdxReference(\"CodeBlock\", true);\n if (!Frame) _missingMdxReference(\"Frame\", true);\n if (!Heading) _missingMdxReference(\"Heading\", true);\n if (!Note) _missingMdxReference(\"Note\", true);\n if (!ZoomImage) _missingMdxReference(\"ZoomImage\", true);\n return _jsxs(_Fragment, {\n children: [_jsxs(_components.p, {\n children: [\"When Google scientists first introduced the concept of the “transformer” (\", _jsx(_components.a, {\n href: \"https://arxiv.org/pdf/1706.03762.pdf\",\n children: \"https://arxiv.org/pdf/1706.03762.pdf\"\n }), \"), the idea received attention within\\nthe Natural Language Processing (NLP) community. It paved a new direction for researchers to develop neural network models with applications towards\\nnatural language understanding.\"]\n }), \"\\n\", _jsx(_components.p, {\n children: \"Not long after, OpenAI popularized Generative Pre-trained Transformers (GPT) and pioneered Large Language Models (LLMs) through ChatGPT. The conversational\\nperformance of these models made people believe that AI is rapidly approaching human-level intelligence, and can therefore be trained to perform a variety\\nof human-capable tasks.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"However, despite how “human” these LLMs may seem, they are far from truly seeing, interpreting, and understanding the world the way humans do.\\nThese models should be regarded as “probabilistic conversation simulators” rather than true analytical engines. The governing concept behind these\\nmodels is pattern matching and semantic correlations, not true logic and reasoning.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"GPT-trainer is powered by OpenAI’s LLMs. It utilizes Retrieval Augmented Generation (RAG) technology to tune its responses to the data that you upload\\nas reference context.\"\n }), \"\\n\", _jsx(Heading, {\n level: \"2\",\n id: \"what-is-retrieval-augmented-generation-rag\",\n children: \"What is Retrieval Augmented Generation (RAG)?\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Large Language Models (LLMs) are trained on enormous amounts of text data. Based on this data, the LLM will identify patterns and try to\\nreplicate them during its own text generation. When producing an output, LLMs start from a user-written prompt, then algorithmically\\nassigns probabilities to “tokens” or words that most likely succeed (follow after) the prompt based on patterns observed within its original\\ntraining data. This is why OpenAI named a number of its API endpoints “Chat Completions” - the model tries to “complete” the user’s input query.\"\n }), \"\\n\", _jsx(Note, {\n children: _jsxs(_components.p, {\n children: [\"To better understand what “tokens” are in the context of LLMs, please refer to\\nthe following article from OpenAI’s own documentation:\\n\", _jsx(_components.a, {\n href: \"https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them\",\n children: \"https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them\"\n })]\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"But patterns do not necessarily imply truth, consistency, or logical compliance, just as correlation does not imply causation.\\nThis is the reason LLMs often “hallucinate” when producing a response. Retrieval Augmented Generation (RAG) tries to remedy\\nthis problem by “biasing” the aforementioned probabilities during text generation via additional context injected into the prompt.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"To further illustrate this, we walk you through the following example:\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"When you ask a question to ChatGPT, a typical query might look something like:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"What is GPT-trainer?\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"This is a query that relies fully on the semantic patterns within the model’s own foundational training data to generate an answer. There is no guarantee that the answer will be accurate or trustworthy.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"But what if the prompt changes to:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"Answer the question below using the following provided context:\"\n }), \"\\n\", \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"GPT-trainer is a powerful no-code/low-code framework that allows you to build\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"multi-agent chatbots with function-calling capabilities using your own data. It is designed to be user-friendly\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"and versatile, providing customization options and integration with popular platforms.\"\n }), \"\\n\", \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"What is GPT-trainer?\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"Now, the model has a lot more to work with. In fact, the semantic patterns identified in the prompt itself can closely guide the model when answering\\nthe user’s question. This “added context” biases the model to generate new tokens in a semantically similar fashion.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"RAG is the process of “enriching” the input query with additional context so that the model answers questions based on provided information.\\nIt does not modify or fine-tune the foundational LLM itself, but rather injects the user’s original prompt with data residing elsewhere.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"So now you might ask, why don’t I just throw my entire 15-million-words library of blog articles directly into the LLM as reference context\\nevery time I ask a question?\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Well, if the LLM is large enough to take in so many tokens (words) as input all at once, then sure, it’s perfectly viable to do so, provided you can afford the costs.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"However, we use OpenAI’s large language models (LLMs), all of which have token limits. The token limit dictates how much “effective content”\\ncan be used as context. For GPT-trainer’s chat, we can fit about 10,000 words using the gpt-3.5-16k model.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"The chatbot sits on top of many documents that, altogether, usually contain far more than 10,000 words. This means that we cannot fit everything\\ninto the token limit all the time.\"\n }), \"\\n\", _jsxs(_components.p, {\n children: [\"We get around this problem by splitting long documents into chunks, calculating embeddings for each chunk, and storing them piecewise into a\\nvector database. Embeddings can be thought of as mathematical representations of the meaning behind a snippet of text. It is like a\\n“universal human language” of sorts, except spoken by machines and represented in mathematical vectors. Natural language statements\\nthat are semantically similar will be “physically” closer together in the embedding vector space. Here is a good article explaining\\n“text embeddings” in greater detail: \", _jsx(_components.a, {\n href: \"https://stackoverflow.blog/2023/11/09/an-intuitive-introduction-to-text-embeddings/\",\n children: \"https://stackoverflow.blog/2023/11/09/an-intuitive-introduction-to-text-embeddings/\"\n })]\n }), \"\\n\", _jsx(_components.p, {\n children: \"Every time you enter an AI query, we algorithmically search the database for relevant chunks to use as reference based on embedding distance.\\nThis is all done independently from the LLM query itself. The LLM does not actively participate in this “chunk selection” step when deciding\\nwhat information to include in the context.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Afterwards, the top chunks get included as context and injected into the user’s original query. This is the basis of Retrieval Augmented\\nGeneration (RAG). Remember our example from earlier? When the user asks:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"What is GPT-trainer?\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"the most relevant chunk that will likely be pulled from our vector database is:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"GPT-trainer is a powerful no-code/low-code framework that allows you to build multi-agent chatbots with function-calling\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"capabilities using your own data. It is designed to be user-friendly and versatile, providing customization options and\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"integration with popular platforms.\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"and the chatbot will now “know” the answer.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Here is a good visual from AWS illustrating at a high level the series of RAG steps:\"\n }), \"\\n\", _jsx(Frame, {\n caption: \"https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-customize-rag.html\",\n children: _jsx(ZoomImage, {\n children: _jsx(\"img\", {\n src: \"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/images/help-1.png\"\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"However, this approach also has limitations. The following types of queries generally work well:\"\n }), \"\\n\", _jsxs(_components.table, {\n children: [_jsx(_components.thead, {\n children: _jsxs(_components.tr, {\n children: [_jsx(_components.th, {\n children: \"Query Type\"\n }), _jsx(_components.th, {\n children: \"Definition\"\n }), _jsx(_components.th, {\n children: \"Example\"\n })]\n })\n }), _jsxs(_components.tbody, {\n children: [_jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Information Retrieval\"\n }), _jsx(_components.td, {\n children: \"Asking for specific information residing within one or more documents\"\n }), _jsx(_components.td, {\n children: \"”What is Paladin Max, Inc.’s PTO policy?”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Topic-centric Summarization\"\n }), _jsx(_components.td, {\n children: \"Aggregating information centered around a theme or topic\"\n }), _jsx(_components.td, {\n children: \"”Summarize the latest developments in generative AI”\"\n })]\n })]\n })]\n }), \"\\n\", _jsx(_components.p, {\n children: \"The following types of queries may not work as well:\"\n }), \"\\n\", _jsxs(_components.table, {\n children: [_jsx(_components.thead, {\n children: _jsxs(_components.tr, {\n children: [_jsx(_components.th, {\n children: \"Query Type\"\n }), _jsx(_components.th, {\n children: \"Definition\"\n }), _jsx(_components.th, {\n children: \"Example\"\n })]\n })\n }), _jsxs(_components.tbody, {\n children: [_jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Document Comparison\"\n }), _jsx(_components.td, {\n children: \"Comparing documents without an explicit criteria\"\n }), _jsx(_components.td, {\n children: \"”Find any inconsistencies across the arguments presented across my documents and list them.”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Counting or Math\"\n }), _jsx(_components.td, {\n children: \"Counting mentions or performing quantitative analysis based on document content\"\n }), _jsx(_components.td, {\n children: \"”How many times was John named across the contracts?”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Meta-Level Instructions\"\n }), _jsx(_components.td, {\n children: \"Directing the AI to trace document structure or reference content on specific sections or pages\"\n }), _jsx(_components.td, {\n children: \"”Identify key points from section 3 of business_report.pdf, covering pages 33-37.”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Library-wide Metadata Inquiries\"\n }), _jsx(_components.td, {\n children: \"Asking about properties or aggregate statistics of multiple documents inside the library\"\n }), _jsx(_components.td, {\n children: \"”How many documents talk about xx topic and list them in a table.”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Longform Text Generation\"\n }), _jsx(_components.td, {\n children: \"Writing extended text based on provided documents\"\n }), _jsx(_components.td, {\n children: \"”Write a 5000 words literature review.”\"\n })]\n })]\n })]\n }), \"\\n\", _jsx(_components.p, {\n children: \"If your use case demands it, our multi-Agent architecture and function-calling support may help address some of these complexities,\\nbut they require advanced configuration on your part. Please refer to our other related articles for best practices when deploying\\nmulti-agent chatbots with function-calling capabilities.\"\n }), \"\\n\", _jsxs(_components.p, {\n children: [\"There may be alternative ways for you to optimize your chatbot’s performance by improving the quality of your training data.\\nTo learn how, please check out \", _jsx(_components.a, {\n href: \"/best-practices\",\n children: \"Best practices for preparing training data\"\n }), \".\"]\n })]\n });\n}\nfunction MDXContent(props = {}) {\n const {wrapper: MDXLayout} = {\n ..._provideComponents(),\n ...props.components\n };\n return MDXLayout ? _jsx(MDXLayout, {\n ...props,\n children: _jsx(_createMdxContent, {\n ...props\n })\n }) : _createMdxContent(props);\n}\nreturn {\n default: MDXContent\n};\nfunction _missingMdxReference(id, component) {\n throw new Error(\"Expected \" + (component ? \"component\" : \"object\") + \" `\" + id + \"` to be defined: you likely forgot to import, pass, or provide it.\");\n}\n","frontmatter":{},"scope":{"mintConfig":{"$schema":"https://mintlify.com/schema.json","name":"GPT-trainer API","logo":{"light":"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/light.svg","dark":"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/dark.svg"},"favicon":"/logo/favicon.png","api":{"baseUrl":"https://app.gpt-trainer.com/api","auth":{"method":"bearer"}},"colors":{"primary":"#2E3F51","light":"#516F90","dark":"#0D001D","background":{"dark":"#111827"},"anchors":{"from":"#ED727B","to":"#F6B7BB"}},"topbarLinks":[{"url":"mailto:hello@gpt-trainer.com","name":"Support","_id":"676a0adaff1411a490c729a2"}],"navigation":[{"group":"Getting Started","pages":["introduction"]},{"group":"Guides","pages":["creating-first-chatbot","lead-collection","human-support-escalation","inbox-notifications","conversation-labeling","multi-agents-chatbot","fine-tuning-agent-intents","supervisor-overrides","byok-pricing-guide","working-with-tables","best-practices","help"]},{"group":"Function Calling","pages":["rag-from-external-data-provider"]},{"group":"API Usage Guides","pages":["api-reference/api-key-setup","api-reference/guide-00-chatbot-create","api-reference/guide-01-chat","api-reference/guide-02-source"]},{"group":"Authentication Webhook","pages":["user-identity"]},{"group":"Chatbots","pages":["api-reference/chatbots/properties-reference","api-reference/chatbots/create","api-reference/chatbots/update","api-reference/chatbots/fetch","api-reference/chatbots/fetch_multi","api-reference/chatbots/delete"]},{"group":"Agents","pages":["api-reference/agents/properties-reference","api-reference/agents/create","api-reference/agents/update","api-reference/agents/fetch_multi","api-reference/agents/delete"]},{"group":"Chatbot Sessions","pages":["api-reference/sessions/properties-reference","api-reference/sessions/create","api-reference/sessions/fetch","api-reference/sessions/fetch_multi","api-reference/sessions/delete","api-reference/sessions/delete_multi"]},{"group":"Session Messages","pages":["api-reference/messages/properties-reference","api-reference/messages/create","api-reference/messages/fetch_multi","api-reference/messages/delete","api-reference/messages/delete_multi"]},{"group":"Data Sources","pages":["api-reference/data-sources/properties-reference","api-reference/data-sources/create-file","api-reference/data-sources/create-qa","api-reference/data-sources/create-url","api-reference/data-sources/update","api-reference/data-sources/fetch_multi","api-reference/data-sources/retrain","api-reference/data-sources/delete","api-reference/data-sources/delete_multi"]},{"group":"Data Source Tags","pages":["api-reference/source-tags/create","api-reference/source-tags/fetch-multi","api-reference/source-tags/update","api-reference/source-tags/delete"]},{"group":"Tool Guides","pages":["tools/tools-intro"]},{"group":"Tools","pages":["tools/weekday"]},{"group":"Whitelabel Dashboard","pages":["whitelabel/whitelabel-intro","whitelabel/whitelabel-plans","whitelabel/whitelabel-users"]},{"group":"Integrations","pages":["whitelabel/whitelabel-zapier","whitelabel/whitelabel-make","whitelabel/whitelabel-meta"]}],"primaryTab":{"name":"Documentation"},"anchors":[{"name":"API References","url":"api-reference","icon":"code","_id":"676a0adaff1411a490c7299f"},{"name":"Tools","url":"tools","icon":"gear","_id":"676a0adaff1411a490c729a0"},{"name":"Whitelabel","url":"whitelabel","icon":"browser","_id":"676a0adaff1411a490c729a1"}],"repo":{"github":{"owner":"ks-collab","repo":"gpt-trainer-docs","contentDirectory":"","deployBranch":"main","isPrivate":false}}},"pageMetadata":{"title":"Why does my chatbot not answer correctly?","description":null,"href":"/help"}}},"mdxExtracts":{"tableOfContents":[{"title":"What is Retrieval Augmented Generation (RAG)?","slug":"what-is-retrieval-augmented-generation-rag","depth":2,"children":[]}],"codeExamples":{}},"description":null,"pageData":{"navWithMetadata":[{"group":"Getting Started","pages":[{"title":"Introduction","description":null,"href":"/introduction"}]},{"group":"Guides","pages":[{"title":"Create Your First Chatbot","description":null,"href":"/creating-first-chatbot"},{"title":"Lead Collection","description":null,"href":"/lead-collection"},{"title":"Human Support Escalation","description":null,"href":"/human-support-escalation"},{"title":"Inbox Notifications","description":null,"href":"/inbox-notifications"},{"title":"Conversation Labeling","description":null,"href":"/conversation-labeling"},{"title":"Multi-Agents Chatbot","description":null,"href":"/multi-agents-chatbot"},{"title":"Fine Tuning Agent Intents","description":null,"href":"/fine-tuning-agent-intents"},{"title":"AI Supervisor Overrides","description":null,"href":"/supervisor-overrides"},{"title":"Bring Your Own Key (BYOK) and Pricing","description":null,"href":"/byok-pricing-guide"},{"title":"Working with Tables and CSV","description":null,"href":"/working-with-tables"},{"title":"Best practices for preparing training data","description":null,"href":"/best-practices"},{"title":"Why does my chatbot not answer correctly?","description":null,"href":"/help"}]},{"group":"Function Calling","pages":[{"title":"RAG from an External Data Provider","description":null,"href":"/rag-from-external-data-provider"}]},{"group":"API Usage Guides","pages":[{"title":"Getting a GPT-trainer API Key","description":null,"href":"/api-reference/api-key-setup"},{"title":"Create Chabot","description":null,"href":"/api-reference/guide-00-chatbot-create"},{"title":"Chat with Chatbot","description":null,"href":"/api-reference/guide-01-chat"},{"title":"Uploading Data Sources","description":null,"href":"/api-reference/guide-02-source"}]},{"group":"Authentication Webhook","pages":[{"title":"User Identity Verification","description":null,"href":"/user-identity"}]},{"group":"Chatbots","pages":[{"title":"Chatbot Properties","description":"Detailed explanation of chatbot's properties","href":"/api-reference/chatbots/properties-reference"},{"title":"Create Chatbot","description":"Create a chatbot that belongs to the authenticated user","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/create","href":"/api-reference/chatbots/create"},{"title":"Update Chatbot","description":"Update chatbot meta base on uuid","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/update","href":"/api-reference/chatbots/update"},{"title":"Fetch a Chatbot","description":"Fetch single chatbot base on uuid","api":"GET https://app.gpt-trainer.com/api/v1/chatbot/{uuid}","href":"/api-reference/chatbots/fetch"},{"title":"Fetch all Chatbots","description":"Fetch the list of chatbots for current user","api":"GET https://app.gpt-trainer.com/api/v1/chatbots","href":"/api-reference/chatbots/fetch_multi"},{"title":"Delete Chatbot","description":"Delete single chatbot base on uuid","api":"DELETE https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/delete","href":"/api-reference/chatbots/delete"}]},{"group":"Agents","pages":[{"title":"Agent Properties","description":"Detailed explanation of agent's properties","href":"/api-reference/agents/properties-reference"},{"title":"Create Agent","description":"Create an agent for a chatbot specified by chatbot uuid","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/\u003cuuid\u003e/agent/create","href":"/api-reference/agents/create"},{"title":"Update Agent","description":"Update agent meta based on uuid","api":"POST https://app.gpt-trainer.com/api/v1/agent/{uuid}/update","href":"/api-reference/agents/update"},{"title":"Fetch all Agents","description":"Fetch the list of agents for a chatbot specified by chatbot uuid","api":"GET https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/agents","href":"/api-reference/agents/fetch_multi"},{"title":"Delete Agent","description":"Delete single agent base on uuid","api":"DELETE https://app.gpt-trainer.com/api/v1/agent/{uuid}/delete","href":"/api-reference/agents/delete"}]},{"group":"Chatbot Sessions","pages":[{"title":"Session Properties","description":"Detailed explanation of sessions's properties","href":"/api-reference/sessions/properties-reference"},{"title":"Create Session","description":"Create a chat session for a chatbot specified by chatbot uuid","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/session/create","href":"/api-reference/sessions/create"},{"title":"Fetch a Session","description":"Fetch single chatbot session base on uuid","api":"GET https://app.gpt-trainer.com/api/v1/session/{uuid}","href":"/api-reference/sessions/fetch"},{"title":"Fetch all Sessions","description":"Fetch the list of sessions for a chatbot specified by chatbot uuid","api":"GET https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/sessions","href":"/api-reference/sessions/fetch_multi"},{"title":"Delete Session","description":"Delete a session by its UUID","api":"POST https://app.gpt-trainer.com/api/v1/session/{uuid}/delete","href":"/api-reference/sessions/delete"},{"title":"Delete Session","description":"Delete a session by its UUID","api":"POST https://app.gpt-trainer.com/api/v1/session/{uuid}/delete","href":"/api-reference/sessions/delete_multi"}]},{"group":"Session Messages","pages":[{"title":"Message Properties","description":"Detailed explanation of message's properties","href":"/api-reference/messages/properties-reference"},{"title":"Create Message","description":"Create a session message for a chatbot session specified by session uuid","api":"POST https://app.gpt-trainer.com/api/v1/session/{uuid}/message/stream","href":"/api-reference/messages/create"},{"title":"Fetch all Messages","description":"Fetch the list of messages for a session specified by session uuid","api":"GET https://app.gpt-trainer.com/api/v1/session/{uuid}/messages","href":"/api-reference/messages/fetch_multi"},{"title":"Delete Message","description":"Delete single message base on uuid","api":"POST https://app.gpt-trainer.com/api/v1/message/{uuid}/delete","href":"/api-reference/messages/delete"},{"title":"Delete multiple Messages","description":"Delete list of messages base on their uuids","api":"POST https://app.gpt-trainer.com/api/v1/messages/delete","href":"/api-reference/messages/delete_multi"}]},{"group":"Data Sources","pages":[{"title":"Source properties","description":"Detailed explanation of source's properties","href":"/api-reference/data-sources/properties-reference"},{"title":"Upload a File","description":"Create a File source for a chatbot specified by chatbot uuid","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/data-source/upload","href":"/api-reference/data-sources/create-file"},{"title":"Create QA Source","description":"Create a QA source for a chatbot specified by chatbot uuid","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/data-source/qa","href":"/api-reference/data-sources/create-qa"},{"title":"Create URL Source","description":"Create a URL source for a chatbot specified by chatbot uuid","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/data-source/url","href":"/api-reference/data-sources/create-url"},{"title":"Update Source","description":"Update source meta base on uuid","api":"POST https://app.gpt-trainer.com/api/v1/data-source/{uuid}/update","href":"/api-reference/data-sources/update"},{"title":"Fetch list of Sources","description":"Fetch the list of sources for a chatbot specified by chatbot uuid","api":"GET https://app.gpt-trainer.com/api/v1/chatbot/{uuid}/data-sources","href":"/api-reference/data-sources/fetch_multi"},{"title":"Retrain Sources","description":"Retrain multiple URL data sources to fetch the latest content from them.","api":"POST https://app.gpt-trainer.com/api/v1/data-sources/url/re-scrape","href":"/api-reference/data-sources/retrain"},{"title":"Delete Source","description":"Delete single source base on uuid","api":"POST https://app.gpt-trainer.com/api/v1/data-source/{uuid}/delete","href":"/api-reference/data-sources/delete"},{"title":"Delete multiple Sources","description":"Delete list of sources base on their uuids","api":"POST https://app.gpt-trainer.com/api/v1/data-sources/delete","href":"/api-reference/data-sources/delete_multi"}]},{"group":"Data Source Tags","pages":[{"title":"Create Source Tag","description":"Create a source tag for a chabot. Source tags can be used to organize sources.","api":"POST https://app.gpt-trainer.com/api/v1/chatbot/\u003cuuid\u003e/source-tag/create","href":"/api-reference/source-tags/create"},{"title":"Fetch all Source Tags","description":"List all source tags for a chabot.","api":"GET https://app.gpt-trainer.com/api/v1/chatbot/\u003cuuid\u003e/source-tags","href":"/api-reference/source-tags/fetch-multi"},{"title":"Update Source Tag","description":"Update the properties of a source tag, including its list of documents.","api":"POST https://app.gpt-trainer.com/api/v1/source-tag/\u003cuuid\u003e/update","href":"/api-reference/source-tags/update"},{"title":"Delete Source Tag","description":"Delete a source tag based on uuid","api":"DELETE https://app.gpt-trainer.com/api/v1/source-tag/{uuid}/delete","href":"/api-reference/source-tags/delete"}]},{"group":"Tool Guides","pages":[{"title":"Introduction","description":null,"href":"/tools/tools-intro"}]},{"group":"Tools","pages":[{"title":"Weekday","description":"This function finds the day of the week, given a date. For example, given the date '2024-10-07', it will return a JSON: `{'weekday': 'Monday'}`","api":"GET https://tools.gpt-trainer.com/weekday","href":"/tools/weekday"}]},{"group":"Whitelabel Dashboard","pages":[{"title":"Introduction and first-time setup","description":null,"href":"/whitelabel/whitelabel-intro"},{"title":"Creating plans and pricing considerations","description":null,"href":"/whitelabel/whitelabel-plans"},{"title":"Managing your users","description":null,"href":"/whitelabel/whitelabel-users"}]},{"group":"Integrations","pages":[{"title":"Publishing a Zapier app","description":null,"href":"/whitelabel/whitelabel-zapier"},{"title":"Publishing a Make app","description":null,"href":"/whitelabel/whitelabel-make"},{"title":"Adding Meta integrations","description":null,"href":"/whitelabel/whitelabel-meta"}]}],"pageMetadata":{"title":"Why does my chatbot not answer correctly?","description":null,"href":"/help"},"mintConfig":{"layout":"topnav","sidebar":{"items":"container"},"topbar":{"style":"default"},"search":{"location":"top"},"rounded":"default","codeBlock":{"mode":"dark"},"$schema":"https://mintlify.com/schema.json","name":"GPT-trainer API","logo":{"light":"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/light.svg","dark":"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/dark.svg"},"favicon":"/logo/favicon.png","api":{"baseUrl":"https://app.gpt-trainer.com/api","auth":{"method":"bearer"}},"colors":{"primary":"#2E3F51","light":"#516F90","dark":"#0D001D","background":{"dark":"#111827"},"anchors":{"from":"#ED727B","to":"#F6B7BB"}},"topbarLinks":[{"url":"mailto:hello@gpt-trainer.com","name":"Support","_id":"676a0adaff1411a490c729a2"}],"navigation":[{"group":"Getting Started","pages":["introduction"]},{"group":"Guides","pages":["creating-first-chatbot","lead-collection","human-support-escalation","inbox-notifications","conversation-labeling","multi-agents-chatbot","fine-tuning-agent-intents","supervisor-overrides","byok-pricing-guide","working-with-tables","best-practices","help"]},{"group":"Function Calling","pages":["rag-from-external-data-provider"]},{"group":"API Usage Guides","pages":["api-reference/api-key-setup","api-reference/guide-00-chatbot-create","api-reference/guide-01-chat","api-reference/guide-02-source"]},{"group":"Authentication Webhook","pages":["user-identity"]},{"group":"Chatbots","pages":["api-reference/chatbots/properties-reference","api-reference/chatbots/create","api-reference/chatbots/update","api-reference/chatbots/fetch","api-reference/chatbots/fetch_multi","api-reference/chatbots/delete"]},{"group":"Agents","pages":["api-reference/agents/properties-reference","api-reference/agents/create","api-reference/agents/update","api-reference/agents/fetch_multi","api-reference/agents/delete"]},{"group":"Chatbot Sessions","pages":["api-reference/sessions/properties-reference","api-reference/sessions/create","api-reference/sessions/fetch","api-reference/sessions/fetch_multi","api-reference/sessions/delete","api-reference/sessions/delete_multi"]},{"group":"Session Messages","pages":["api-reference/messages/properties-reference","api-reference/messages/create","api-reference/messages/fetch_multi","api-reference/messages/delete","api-reference/messages/delete_multi"]},{"group":"Data Sources","pages":["api-reference/data-sources/properties-reference","api-reference/data-sources/create-file","api-reference/data-sources/create-qa","api-reference/data-sources/create-url","api-reference/data-sources/update","api-reference/data-sources/fetch_multi","api-reference/data-sources/retrain","api-reference/data-sources/delete","api-reference/data-sources/delete_multi"]},{"group":"Data Source Tags","pages":["api-reference/source-tags/create","api-reference/source-tags/fetch-multi","api-reference/source-tags/update","api-reference/source-tags/delete"]},{"group":"Tool Guides","pages":["tools/tools-intro"]},{"group":"Tools","pages":["tools/weekday"]},{"group":"Whitelabel Dashboard","pages":["whitelabel/whitelabel-intro","whitelabel/whitelabel-plans","whitelabel/whitelabel-users"]},{"group":"Integrations","pages":["whitelabel/whitelabel-zapier","whitelabel/whitelabel-make","whitelabel/whitelabel-meta"]}],"primaryTab":{"name":"Documentation"},"anchors":[{"name":"API References","url":"api-reference","icon":"code","_id":"676a0adaff1411a490c7299f"},{"name":"Tools","url":"tools","icon":"gear","_id":"676a0adaff1411a490c729a0"},{"name":"Whitelabel","url":"whitelabel","icon":"browser","_id":"676a0adaff1411a490c729a1"}],"repo":{"github":{"owner":"ks-collab","repo":"gpt-trainer-docs","contentDirectory":"","deployBranch":"main","isPrivate":false}}},"apiReferenceData":{}},"favicons":{"icons":[{"rel":"apple-touch-icon","sizes":"180x180","href":"https://mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/apple-touch-icon.png?v=3","type":"image/png"},{"rel":"icon","sizes":"32x32","href":"https://mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/favicon-32x32.png?v=3","type":"image/png"},{"rel":"icon","sizes":"16x16","href":"https://mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/favicon-16x16.png?v=3","type":"image/png"},{"rel":"shortcut icon","href":"https://mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/favicon.ico?v=3","type":"image/x-icon"}],"browserconfig":"https://mintlify.s3-us-west-1.amazonaws.com/paladinmaxinc/_generated/favicon/browserconfig.xml?v=3"},"subdomain":"guide.gpt-trainer.com","internalAnalyticsWriteKey":"phc_TXdpocbGVeZVm5VJmAsHTMrCofBQu3e0kN8HGMNGTVW","inkeep":{"integrationApiKey":"4f40617e2acf6b9193ebf897d3ed2d80d831b9c5b431b91d"},"trieve":{"datasetId":"5f00ed3a-c71f-499e-8e71-5e23e2290da3"},"shouldIndex":true,"org":{"createdAt":"2024-01-20T00:51:05.035Z"},"cssFiles":[],"jsFiles":[],"mdxSourceWithNoJs":{"compiledSource":"\"use strict\";\nconst {Fragment: _Fragment, jsx: _jsx, jsxs: _jsxs} = arguments[0];\nconst {useMDXComponents: _provideComponents} = arguments[0];\nfunction _createMdxContent(props) {\n const _components = {\n a: \"a\",\n code: \"code\",\n p: \"p\",\n pre: \"pre\",\n span: \"span\",\n table: \"table\",\n tbody: \"tbody\",\n td: \"td\",\n th: \"th\",\n thead: \"thead\",\n tr: \"tr\",\n ..._provideComponents(),\n ...props.components\n }, {CodeBlock, Frame, Heading, Note, ZoomImage} = _components;\n if (!CodeBlock) _missingMdxReference(\"CodeBlock\", true);\n if (!Frame) _missingMdxReference(\"Frame\", true);\n if (!Heading) _missingMdxReference(\"Heading\", true);\n if (!Note) _missingMdxReference(\"Note\", true);\n if (!ZoomImage) _missingMdxReference(\"ZoomImage\", true);\n return _jsxs(_Fragment, {\n children: [_jsxs(_components.p, {\n children: [\"When Google scientists first introduced the concept of the “transformer” (\", _jsx(_components.a, {\n href: \"https://arxiv.org/pdf/1706.03762.pdf\",\n children: \"https://arxiv.org/pdf/1706.03762.pdf\"\n }), \"), the idea received attention within\\nthe Natural Language Processing (NLP) community. It paved a new direction for researchers to develop neural network models with applications towards\\nnatural language understanding.\"]\n }), \"\\n\", _jsx(_components.p, {\n children: \"Not long after, OpenAI popularized Generative Pre-trained Transformers (GPT) and pioneered Large Language Models (LLMs) through ChatGPT. The conversational\\nperformance of these models made people believe that AI is rapidly approaching human-level intelligence, and can therefore be trained to perform a variety\\nof human-capable tasks.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"However, despite how “human” these LLMs may seem, they are far from truly seeing, interpreting, and understanding the world the way humans do.\\nThese models should be regarded as “probabilistic conversation simulators” rather than true analytical engines. The governing concept behind these\\nmodels is pattern matching and semantic correlations, not true logic and reasoning.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"GPT-trainer is powered by OpenAI’s LLMs. It utilizes Retrieval Augmented Generation (RAG) technology to tune its responses to the data that you upload\\nas reference context.\"\n }), \"\\n\", _jsx(Heading, {\n level: \"2\",\n id: \"what-is-retrieval-augmented-generation-rag\",\n children: \"What is Retrieval Augmented Generation (RAG)?\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Large Language Models (LLMs) are trained on enormous amounts of text data. Based on this data, the LLM will identify patterns and try to\\nreplicate them during its own text generation. When producing an output, LLMs start from a user-written prompt, then algorithmically\\nassigns probabilities to “tokens” or words that most likely succeed (follow after) the prompt based on patterns observed within its original\\ntraining data. This is why OpenAI named a number of its API endpoints “Chat Completions” - the model tries to “complete” the user’s input query.\"\n }), \"\\n\", _jsx(Note, {\n children: _jsxs(_components.p, {\n children: [\"To better understand what “tokens” are in the context of LLMs, please refer to\\nthe following article from OpenAI’s own documentation:\\n\", _jsx(_components.a, {\n href: \"https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them\",\n children: \"https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them\"\n })]\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"But patterns do not necessarily imply truth, consistency, or logical compliance, just as correlation does not imply causation.\\nThis is the reason LLMs often “hallucinate” when producing a response. Retrieval Augmented Generation (RAG) tries to remedy\\nthis problem by “biasing” the aforementioned probabilities during text generation via additional context injected into the prompt.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"To further illustrate this, we walk you through the following example:\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"When you ask a question to ChatGPT, a typical query might look something like:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"What is GPT-trainer?\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"This is a query that relies fully on the semantic patterns within the model’s own foundational training data to generate an answer. There is no guarantee that the answer will be accurate or trustworthy.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"But what if the prompt changes to:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"Answer the question below using the following provided context:\"\n }), \"\\n\", \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"GPT-trainer is a powerful no-code/low-code framework that allows you to build\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"multi-agent chatbots with function-calling capabilities using your own data. It is designed to be user-friendly\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"and versatile, providing customization options and integration with popular platforms.\"\n }), \"\\n\", \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"What is GPT-trainer?\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"Now, the model has a lot more to work with. In fact, the semantic patterns identified in the prompt itself can closely guide the model when answering\\nthe user’s question. This “added context” biases the model to generate new tokens in a semantically similar fashion.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"RAG is the process of “enriching” the input query with additional context so that the model answers questions based on provided information.\\nIt does not modify or fine-tune the foundational LLM itself, but rather injects the user’s original prompt with data residing elsewhere.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"So now you might ask, why don’t I just throw my entire 15-million-words library of blog articles directly into the LLM as reference context\\nevery time I ask a question?\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Well, if the LLM is large enough to take in so many tokens (words) as input all at once, then sure, it’s perfectly viable to do so, provided you can afford the costs.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"However, we use OpenAI’s large language models (LLMs), all of which have token limits. The token limit dictates how much “effective content”\\ncan be used as context. For GPT-trainer’s chat, we can fit about 10,000 words using the gpt-3.5-16k model.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"The chatbot sits on top of many documents that, altogether, usually contain far more than 10,000 words. This means that we cannot fit everything\\ninto the token limit all the time.\"\n }), \"\\n\", _jsxs(_components.p, {\n children: [\"We get around this problem by splitting long documents into chunks, calculating embeddings for each chunk, and storing them piecewise into a\\nvector database. Embeddings can be thought of as mathematical representations of the meaning behind a snippet of text. It is like a\\n“universal human language” of sorts, except spoken by machines and represented in mathematical vectors. Natural language statements\\nthat are semantically similar will be “physically” closer together in the embedding vector space. Here is a good article explaining\\n“text embeddings” in greater detail: \", _jsx(_components.a, {\n href: \"https://stackoverflow.blog/2023/11/09/an-intuitive-introduction-to-text-embeddings/\",\n children: \"https://stackoverflow.blog/2023/11/09/an-intuitive-introduction-to-text-embeddings/\"\n })]\n }), \"\\n\", _jsx(_components.p, {\n children: \"Every time you enter an AI query, we algorithmically search the database for relevant chunks to use as reference based on embedding distance.\\nThis is all done independently from the LLM query itself. The LLM does not actively participate in this “chunk selection” step when deciding\\nwhat information to include in the context.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Afterwards, the top chunks get included as context and injected into the user’s original query. This is the basis of Retrieval Augmented\\nGeneration (RAG). Remember our example from earlier? When the user asks:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"What is GPT-trainer?\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"the most relevant chunk that will likely be pulled from our vector database is:\"\n }), \"\\n\", _jsx(CodeBlock, {\n filename: \"\",\n expandable: \"false\",\n children: _jsx(_components.pre, {\n className: \"language-plaintext\",\n children: _jsxs(_components.code, {\n children: [_jsx(_components.span, {\n className: \"\",\n children: \"GPT-trainer is a powerful no-code/low-code framework that allows you to build multi-agent chatbots with function-calling\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"capabilities using your own data. It is designed to be user-friendly and versatile, providing customization options and\"\n }), \"\\n\", _jsx(_components.span, {\n className: \"\",\n children: \"integration with popular platforms.\"\n }), \"\\n\"]\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"and the chatbot will now “know” the answer.\"\n }), \"\\n\", _jsx(_components.p, {\n children: \"Here is a good visual from AWS illustrating at a high level the series of RAG steps:\"\n }), \"\\n\", _jsx(Frame, {\n caption: \"https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-customize-rag.html\",\n children: _jsx(ZoomImage, {\n children: _jsx(\"img\", {\n src: \"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/images/help-1.png\"\n })\n })\n }), \"\\n\", _jsx(_components.p, {\n children: \"However, this approach also has limitations. The following types of queries generally work well:\"\n }), \"\\n\", _jsxs(_components.table, {\n children: [_jsx(_components.thead, {\n children: _jsxs(_components.tr, {\n children: [_jsx(_components.th, {\n children: \"Query Type\"\n }), _jsx(_components.th, {\n children: \"Definition\"\n }), _jsx(_components.th, {\n children: \"Example\"\n })]\n })\n }), _jsxs(_components.tbody, {\n children: [_jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Information Retrieval\"\n }), _jsx(_components.td, {\n children: \"Asking for specific information residing within one or more documents\"\n }), _jsx(_components.td, {\n children: \"”What is Paladin Max, Inc.’s PTO policy?”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Topic-centric Summarization\"\n }), _jsx(_components.td, {\n children: \"Aggregating information centered around a theme or topic\"\n }), _jsx(_components.td, {\n children: \"”Summarize the latest developments in generative AI”\"\n })]\n })]\n })]\n }), \"\\n\", _jsx(_components.p, {\n children: \"The following types of queries may not work as well:\"\n }), \"\\n\", _jsxs(_components.table, {\n children: [_jsx(_components.thead, {\n children: _jsxs(_components.tr, {\n children: [_jsx(_components.th, {\n children: \"Query Type\"\n }), _jsx(_components.th, {\n children: \"Definition\"\n }), _jsx(_components.th, {\n children: \"Example\"\n })]\n })\n }), _jsxs(_components.tbody, {\n children: [_jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Document Comparison\"\n }), _jsx(_components.td, {\n children: \"Comparing documents without an explicit criteria\"\n }), _jsx(_components.td, {\n children: \"”Find any inconsistencies across the arguments presented across my documents and list them.”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Counting or Math\"\n }), _jsx(_components.td, {\n children: \"Counting mentions or performing quantitative analysis based on document content\"\n }), _jsx(_components.td, {\n children: \"”How many times was John named across the contracts?”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Meta-Level Instructions\"\n }), _jsx(_components.td, {\n children: \"Directing the AI to trace document structure or reference content on specific sections or pages\"\n }), _jsx(_components.td, {\n children: \"”Identify key points from section 3 of business_report.pdf, covering pages 33-37.”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Library-wide Metadata Inquiries\"\n }), _jsx(_components.td, {\n children: \"Asking about properties or aggregate statistics of multiple documents inside the library\"\n }), _jsx(_components.td, {\n children: \"”How many documents talk about xx topic and list them in a table.”\"\n })]\n }), _jsxs(_components.tr, {\n children: [_jsx(_components.td, {\n children: \"Longform Text Generation\"\n }), _jsx(_components.td, {\n children: \"Writing extended text based on provided documents\"\n }), _jsx(_components.td, {\n children: \"”Write a 5000 words literature review.”\"\n })]\n })]\n })]\n }), \"\\n\", _jsx(_components.p, {\n children: \"If your use case demands it, our multi-Agent architecture and function-calling support may help address some of these complexities,\\nbut they require advanced configuration on your part. Please refer to our other related articles for best practices when deploying\\nmulti-agent chatbots with function-calling capabilities.\"\n }), \"\\n\", _jsxs(_components.p, {\n children: [\"There may be alternative ways for you to optimize your chatbot’s performance by improving the quality of your training data.\\nTo learn how, please check out \", _jsx(_components.a, {\n href: \"/best-practices\",\n children: \"Best practices for preparing training data\"\n }), \".\"]\n })]\n });\n}\nfunction MDXContent(props = {}) {\n const {wrapper: MDXLayout} = {\n ..._provideComponents(),\n ...props.components\n };\n return MDXLayout ? _jsx(MDXLayout, {\n ...props,\n children: _jsx(_createMdxContent, {\n ...props\n })\n }) : _createMdxContent(props);\n}\nreturn {\n default: MDXContent\n};\nfunction _missingMdxReference(id, component) {\n throw new Error(\"Expected \" + (component ? \"component\" : \"object\") + \" `\" + id + \"` to be defined: you likely forgot to import, pass, or provide it.\");\n}\n","frontmatter":{},"scope":{"mintConfig":{"$schema":"https://mintlify.com/schema.json","name":"GPT-trainer API","logo":{"light":"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/light.svg","dark":"https://mintlify.s3.us-west-1.amazonaws.com/paladinmaxinc/logo/dark.svg"},"favicon":"/logo/favicon.png","api":{"baseUrl":"https://app.gpt-trainer.com/api","auth":{"method":"bearer"}},"colors":{"primary":"#2E3F51","light":"#516F90","dark":"#0D001D","background":{"dark":"#111827"},"anchors":{"from":"#ED727B","to":"#F6B7BB"}},"topbarLinks":[{"url":"mailto:hello@gpt-trainer.com","name":"Support","_id":"676a0adaff1411a490c729a2"}],"navigation":[{"group":"Getting Started","pages":["introduction"]},{"group":"Guides","pages":["creating-first-chatbot","lead-collection","human-support-escalation","inbox-notifications","conversation-labeling","multi-agents-chatbot","fine-tuning-agent-intents","supervisor-overrides","byok-pricing-guide","working-with-tables","best-practices","help"]},{"group":"Function Calling","pages":["rag-from-external-data-provider"]},{"group":"API Usage Guides","pages":["api-reference/api-key-setup","api-reference/guide-00-chatbot-create","api-reference/guide-01-chat","api-reference/guide-02-source"]},{"group":"Authentication Webhook","pages":["user-identity"]},{"group":"Chatbots","pages":["api-reference/chatbots/properties-reference","api-reference/chatbots/create","api-reference/chatbots/update","api-reference/chatbots/fetch","api-reference/chatbots/fetch_multi","api-reference/chatbots/delete"]},{"group":"Agents","pages":["api-reference/agents/properties-reference","api-reference/agents/create","api-reference/agents/update","api-reference/agents/fetch_multi","api-reference/agents/delete"]},{"group":"Chatbot Sessions","pages":["api-reference/sessions/properties-reference","api-reference/sessions/create","api-reference/sessions/fetch","api-reference/sessions/fetch_multi","api-reference/sessions/delete","api-reference/sessions/delete_multi"]},{"group":"Session Messages","pages":["api-reference/messages/properties-reference","api-reference/messages/create","api-reference/messages/fetch_multi","api-reference/messages/delete","api-reference/messages/delete_multi"]},{"group":"Data Sources","pages":["api-reference/data-sources/properties-reference","api-reference/data-sources/create-file","api-reference/data-sources/create-qa","api-reference/data-sources/create-url","api-reference/data-sources/update","api-reference/data-sources/fetch_multi","api-reference/data-sources/retrain","api-reference/data-sources/delete","api-reference/data-sources/delete_multi"]},{"group":"Data Source Tags","pages":["api-reference/source-tags/create","api-reference/source-tags/fetch-multi","api-reference/source-tags/update","api-reference/source-tags/delete"]},{"group":"Tool Guides","pages":["tools/tools-intro"]},{"group":"Tools","pages":["tools/weekday"]},{"group":"Whitelabel Dashboard","pages":["whitelabel/whitelabel-intro","whitelabel/whitelabel-plans","whitelabel/whitelabel-users"]},{"group":"Integrations","pages":["whitelabel/whitelabel-zapier","whitelabel/whitelabel-make","whitelabel/whitelabel-meta"]}],"primaryTab":{"name":"Documentation"},"anchors":[{"name":"API References","url":"api-reference","icon":"code","_id":"676a0adaff1411a490c7299f"},{"name":"Tools","url":"tools","icon":"gear","_id":"676a0adaff1411a490c729a0"},{"name":"Whitelabel","url":"whitelabel","icon":"browser","_id":"676a0adaff1411a490c729a1"}],"repo":{"github":{"owner":"ks-collab","repo":"gpt-trainer-docs","contentDirectory":"","deployBranch":"main","isPrivate":false}}},"pageMetadata":{"title":"Why does my chatbot not answer correctly?","description":null,"href":"/help"}}}},"__N_SSG":true},"page":"/_sites/[subdomain]/[[...slug]]","query":{"subdomain":"guide.gpt-trainer.com","slug":["help"]},"buildId":"pChs_9tFT1YAEINLWWPhQ","isFallback":false,"isExperimentalCompile":false,"gsp":true,"scriptLoader":[]}</script>
<!-- Mirrored from guide.gpt-trainer.com/help by HTTrack Website Copier/3.x [XR&CO'2014], Tue, 07 Jan 2025 14:53:24 GMT -->
</html></body></html>