| 
                        123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340 | 
                        - import type { AnnotationReplyConfig, ChatPromptConfig, CompletionPromptConfig, DatasetConfigs, PromptMode } from '@/models/debug.ts'
 - import type { ExternalDataTool } from '@/models/common'
 - export enum ProviderType {
 -   openai = 'openai',
 -   anthropic = 'anthropic',
 -   azure_openai = 'azure_openai',
 -   replicate = 'replicate',
 -   huggingface_hub = 'huggingface_hub',
 -   minimax = 'minimax',
 -   tongyi = 'tongyi',
 -   spark = 'spark',
 - }
 - 
 - export enum AppType {
 -   'chat' = 'chat',
 -   'completion' = 'completion',
 - }
 - 
 - export enum ModelModeType {
 -   'chat' = 'chat',
 -   'completion' = 'completion',
 -   'unset' = '',
 - }
 - 
 - export enum RETRIEVE_TYPE {
 -   oneWay = 'single',
 -   multiWay = 'multiple',
 - }
 - 
 - export enum RETRIEVE_METHOD {
 -   semantic = 'semantic_search',
 -   fullText = 'full_text_search',
 -   hybrid = 'hybrid_search',
 -   invertedIndex = 'invertedIndex',
 - }
 - 
 - export type VariableInput = {
 -   key: string
 -   name: string
 -   value: string
 - }
 - 
 - /**
 -  * App modes
 -  */
 - export const AppModes = ['completion', 'chat'] as const
 - export type AppMode = typeof AppModes[number]
 - 
 - /**
 -  * Variable type
 -  */
 - export const VariableTypes = ['string', 'number', 'select'] as const
 - export type VariableType = typeof VariableTypes[number]
 - 
 - /**
 -  * Prompt variable parameter
 -  */
 - export type PromptVariable = {
 -   /** Variable key */
 -   key: string
 -   /** Variable name */
 -   name: string
 -   /** Type */
 -   type: VariableType
 -   required: boolean
 -   /** Enumeration of single-selection drop-down values */
 -   options?: string[]
 -   max_length?: number
 - }
 - 
 - export type TextTypeFormItem = {
 -   label: string
 -   variable: string
 -   required: boolean
 -   max_length: number
 - }
 - 
 - export type SelectTypeFormItem = {
 -   label: string
 -   variable: string
 -   required: boolean
 -   options: string[]
 - }
 - /**
 -  * User Input Form Item
 -  */
 - export type UserInputFormItem = {
 -   'text-input': TextTypeFormItem
 - } | {
 -   'select': SelectTypeFormItem
 - }
 - 
 - export type ToolItem = {
 -   dataset: {
 -     enabled: boolean
 -     id: string
 -   }
 - } | {
 -   'sensitive-word-avoidance': {
 -     enabled: boolean
 -     words: string[]
 -     canned_response: string
 -   }
 - }
 - 
 - /**
 -  * Model configuration. The backend type.
 -  */
 - export type ModelConfig = {
 -   opening_statement: string
 -   pre_prompt: string
 -   prompt_type: PromptMode
 -   chat_prompt_config: ChatPromptConfig | {}
 -   completion_prompt_config: CompletionPromptConfig | {}
 -   user_input_form: UserInputFormItem[]
 -   dataset_query_variable?: string
 -   more_like_this: {
 -     enabled: boolean
 -   }
 -   suggested_questions_after_answer: {
 -     enabled: boolean
 -   }
 -   speech_to_text: {
 -     enabled: boolean
 -   }
 -   retriever_resource: {
 -     enabled: boolean
 -   }
 -   sensitive_word_avoidance: {
 -     enabled: boolean
 -   }
 -   external_data_tools: ExternalDataTool[]
 -   annotation_reply?: AnnotationReplyConfig
 -   agent_mode: {
 -     enabled: boolean
 -     tools: ToolItem[]
 -   }
 -   model: {
 -     /** LLM provider, e.g., OPENAI */
 -     provider: string
 -     /** Model name, e.g, gpt-3.5.turbo */
 -     name: string
 -     mode: ModelModeType
 -     /** Default Completion call parameters */
 -     completion_params: {
 -       /** Maximum number of tokens in the answer message returned by Completion */
 -       max_tokens: number
 -       /**
 -        * A number between 0 and 2.
 -        * The larger the number, the more random the result;
 -        * otherwise, the more deterministic.
 -        * When in use, choose either `temperature` or `top_p`.
 -        * Default is 1.
 -        */
 -       temperature: number
 -       /**
 -        * Represents the proportion of probability mass samples to take,
 -        * e.g., 0.1 means taking the top 10% probability mass samples.
 -        * The determinism between the samples is basically consistent.
 -        * Among these results, the `top_p` probability mass results are taken.
 -        * When in use, choose either `temperature` or `top_p`.
 -        * Default is 1.
 -        */
 -       top_p: number
 -       /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
 -       echo: boolean
 -       /**
 -        * Specify up to 4 to automatically stop generating before the text specified in `stop`.
 -        * Suitable for use in chat mode.
 -        * For example, specify "Q" and "A",
 -        * and provide some Q&A examples as context,
 -        * and the model will give out in Q&A format and stop generating before Q&A.
 -        */
 -       stop: string[]
 -       /**
 -        * A number between -2.0 and 2.0.
 -        * The larger the value, the less the model will repeat topics and the more it will provide new topics.
 -        */
 -       presence_penalty: number
 -       /**
 -        * A number between -2.0 and 2.0.
 -        * A lower setting will make the model appear less cultured,
 -        * always repeating expressions.
 -        * The difference between `frequency_penalty` and `presence_penalty`
 -        * is that `frequency_penalty` penalizes a word based on its frequency in the training data,
 -        * while `presence_penalty` penalizes a word based on its occurrence in the input text.
 -        */
 -       frequency_penalty: number
 -     }
 -   }
 -   dataset_configs: DatasetConfigs
 -   file_upload?: {
 -     image: VisionSettings
 -   }
 -   files?: VisionFile[]
 - }
 - 
 - export const LanguagesSupported = ['zh-Hans', 'en-US'] as const
 - export type Language = typeof LanguagesSupported[number]
 - 
 - /**
 -  * Web Application Configuration
 -  */
 - export type SiteConfig = {
 -   /** Application URL Identifier: `http://dify.app/{access_token}` */
 -   access_token: string
 -   /** Public Title */
 -   title: string
 -   /** Application Description will be shown in the Client  */
 -   description: string
 -   /** Author */
 -   author: string
 -   /** User Support Email Address */
 -   support_email: string
 -   /**
 -    * Default Language, e.g. zh-Hans, en-US
 -    * Use standard RFC 4646, see https://www.ruanyifeng.com/blog/2008/02/codes_for_language_names.html
 -    */
 -   default_language: Language
 -   /**  Custom Domain */
 -   customize_domain: string
 -   /** Theme */
 -   theme: string
 -   /** Custom Token strategy Whether Terminal Users can choose their OpenAI Key */
 -   customize_token_strategy: 'must' | 'allow' | 'not_allow'
 -   /** Is Prompt Public */
 -   prompt_public: boolean
 -   /** Web API and APP Base Domain Name */
 -   app_base_url: string
 -   /** Copyright */
 -   copyright: string
 -   /** Privacy Policy */
 -   privacy_policy: string
 - 
 -   icon: string
 -   icon_background: string
 - }
 - 
 - /**
 -  * App
 -  */
 - export type App = {
 -   /** App ID */
 -   id: string
 -   /** Name */
 -   name: string
 - 
 -   /** Icon */
 -   icon: string
 -   /** Icon Background */
 -   icon_background: string
 - 
 -   /** Mode */
 -   mode: AppMode
 -   /** Enable web app */
 -   enable_site: boolean
 -   /** Enable web API */
 -   enable_api: boolean
 -   /** API requests per minute, default is 60 */
 -   api_rpm: number
 -   /** API requests per hour, default is 3600 */
 -   api_rph: number
 -   /** Whether it's a demo app */
 -   is_demo: boolean
 -   /** Model configuration */
 -   model_config: ModelConfig
 -   app_model_config: ModelConfig
 -   /** Timestamp of creation */
 -   created_at: number
 -   /** Web Application Configuration */
 -   site: SiteConfig
 -   /** api site url */
 -   api_base_url: string
 - }
 - 
 - /**
 -  * App Template
 -  */
 - export type AppTemplate = {
 -   /** Name */
 -   name: string
 -   /** Description */
 -   description: string
 -   /** Mode */
 -   mode: AppMode
 -   /** Model */
 -   model_config: ModelConfig
 - }
 - 
 - export enum Resolution {
 -   low = 'low',
 -   high = 'high',
 - }
 - 
 - export enum TransferMethod {
 -   all = 'all',
 -   local_file = 'local_file',
 -   remote_url = 'remote_url',
 - }
 - 
 - export const ALLOW_FILE_EXTENSIONS = ['png', 'jpg', 'jpeg', 'webp', 'gif']
 - 
 - export type VisionSettings = {
 -   enabled: boolean
 -   number_limits: number
 -   detail: Resolution
 -   transfer_methods: TransferMethod[]
 -   image_file_size_limit?: number | string
 - }
 - 
 - export type ImageFile = {
 -   type: TransferMethod
 -   _id: string
 -   fileId: string
 -   file?: File
 -   progress: number
 -   url: string
 -   base64Url?: string
 -   deleted?: boolean
 - }
 - 
 - export type VisionFile = {
 -   id?: string
 -   type: string
 -   transfer_method: TransferMethod
 -   url: string
 -   upload_file_id: string
 - }
 - 
 - export type RetrievalConfig = {
 -   search_method: RETRIEVE_METHOD
 -   reranking_enable: boolean
 -   reranking_model: {
 -     reranking_provider_name: string
 -     reranking_model_name: string
 -   }
 -   top_k: number
 -   score_threshold_enabled: boolean
 -   score_threshold: number
 - }
 
 
  |