Browse Source

Revert "chore: use node specify llm to auto generate prompt" (#6555)

tags/0.6.15
Joel 1 year ago
parent
commit
d726473c6d
No account linked to committer's email address

+ 0
- 10
web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx View File

import cn from '@/utils/classnames' import cn from '@/utils/classnames'
import { type PromptVariable } from '@/models/debug' import { type PromptVariable } from '@/models/debug'
import Tooltip from '@/app/components/base/tooltip' import Tooltip from '@/app/components/base/tooltip'
import type { CompletionParams } from '@/types/app'
import { AppType } from '@/types/app' import { AppType } from '@/types/app'
import { getNewVar, getVars } from '@/utils/var' import { getNewVar, getVars } from '@/utils/var'
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn' import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
const { eventEmitter } = useEventEmitterContextContext() const { eventEmitter } = useEventEmitterContextContext()
const { const {
modelConfig, modelConfig,
completionParams,
dataSets, dataSets,
setModelConfig, setModelConfig,
setPrevPromptConfig, setPrevPromptConfig,
{showAutomatic && ( {showAutomatic && (
<GetAutomaticResModal <GetAutomaticResModal
mode={mode as AppType} mode={mode as AppType}
model={
{
provider: modelConfig.provider,
name: modelConfig.model_id,
mode: modelConfig.mode,
completion_params: completionParams as CompletionParams,
}
}
isShow={showAutomatic} isShow={showAutomatic}
onClose={showAutomaticFalse} onClose={showAutomaticFalse}
onFinished={handleAutomaticRes} onFinished={handleAutomaticRes}

+ 1
- 11
web/app/components/app/configuration/config/automatic/get-automatic-res.tsx View File

import Toast from '@/app/components/base/toast' import Toast from '@/app/components/base/toast'
import { generateRule } from '@/service/debug' import { generateRule } from '@/service/debug'
import ConfigPrompt from '@/app/components/app/configuration/config-prompt' import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
import type { Model } from '@/types/app'
import { AppType } from '@/types/app' import { AppType } from '@/types/app'
import ConfigVar from '@/app/components/app/configuration/config-var' import ConfigVar from '@/app/components/app/configuration/config-var'
import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement' import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement'


export type IGetAutomaticResProps = { export type IGetAutomaticResProps = {
mode: AppType mode: AppType
model: Model
isShow: boolean isShow: boolean
onClose: () => void onClose: () => void
onFinished: (res: AutomaticRes) => void onFinished: (res: AutomaticRes) => void


const GetAutomaticRes: FC<IGetAutomaticResProps> = ({ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
mode, mode,
model,
isShow, isShow,
onClose, onClose,
isInLLMNode, isInLLMNode,
return return
setLoadingTrue() setLoadingTrue()
try { try {
const { error, ...res } = await generateRule({
const res = await generateRule({
instruction, instruction,
model_config: model,
}) })
setRes(res) setRes(res)
if (error) {
Toast.notify({
type: 'error',
message: error,
})
}
} }
finally { finally {
setLoadingFalse() setLoadingFalse()

+ 1
- 4
web/app/components/workflow/nodes/_base/components/prompt/editor.tsx View File

import { useBoolean } from 'ahooks' import { useBoolean } from 'ahooks'
import { BlockEnum, EditionType } from '../../../../types' import { BlockEnum, EditionType } from '../../../../types'
import type { import type {
ModelConfig,
Node, Node,
NodeOutPutVar, NodeOutPutVar,
Variable, Variable,
availableNodes?: Node[] availableNodes?: Node[]
isSupportPromptGenerator?: boolean isSupportPromptGenerator?: boolean
onGenerated?: (prompt: string) => void onGenerated?: (prompt: string) => void
modelConfig?: ModelConfig
// for jinja // for jinja
isSupportJinja?: boolean isSupportJinja?: boolean
editionType?: EditionType editionType?: EditionType
varList = [], varList = [],
handleAddVariable, handleAddVariable,
onGenerated, onGenerated,
modelConfig,
}) => { }) => {
const { t } = useTranslation() const { t } = useTranslation()
const { eventEmitter } = useEventEmitterContextContext() const { eventEmitter } = useEventEmitterContextContext()
<div className='flex items-center'> <div className='flex items-center'>
<div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div> <div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div>
{isSupportPromptGenerator && ( {isSupportPromptGenerator && (
<PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} modelConfig={modelConfig} />
<PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} />
)} )}


<div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div> <div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div>

+ 1
- 4
web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx View File

import { uniqueId } from 'lodash-es' import { uniqueId } from 'lodash-es'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { RiQuestionLine } from '@remixicon/react' import { RiQuestionLine } from '@remixicon/react'
import type { ModelConfig, PromptItem, Variable } from '../../../types'
import type { PromptItem, Variable } from '../../../types'
import { EditionType } from '../../../types' import { EditionType } from '../../../types'
import { useWorkflowStore } from '../../../store' import { useWorkflowStore } from '../../../store'
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor' import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
availableNodes: any availableNodes: any
varList: Variable[] varList: Variable[]
handleAddVariable: (payload: any) => void handleAddVariable: (payload: any) => void
modelConfig?: ModelConfig
} }


const roleOptions = [ const roleOptions = [
availableNodes, availableNodes,
varList, varList,
handleAddVariable, handleAddVariable,
modelConfig,
}) => { }) => {
const { t } = useTranslation() const { t } = useTranslation()
const workflowStore = useWorkflowStore() const workflowStore = useWorkflowStore()
availableNodes={availableNodes} availableNodes={availableNodes}
isSupportPromptGenerator={payload.role === PromptRole.system} isSupportPromptGenerator={payload.role === PromptRole.system}
onGenerated={handleGenerated} onGenerated={handleGenerated}
modelConfig={modelConfig}
isSupportJinja isSupportJinja
editionType={payload.edition_type} editionType={payload.edition_type}
onEditionTypeChange={onEditionTypeChange} onEditionTypeChange={onEditionTypeChange}

+ 1
- 5
web/app/components/workflow/nodes/llm/components/config-prompt.tsx View File

import produce from 'immer' import produce from 'immer'
import { ReactSortable } from 'react-sortablejs' import { ReactSortable } from 'react-sortablejs'
import { v4 as uuid4 } from 'uuid' import { v4 as uuid4 } from 'uuid'
import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types'
import type { PromptItem, ValueSelector, Var, Variable } from '../../../types'
import { EditionType, PromptRole } from '../../../types' import { EditionType, PromptRole } from '../../../types'
import useAvailableVarList from '../../_base/hooks/use-available-var-list' import useAvailableVarList from '../../_base/hooks/use-available-var-list'
import { useWorkflowStore } from '../../../store' import { useWorkflowStore } from '../../../store'
} }
varList?: Variable[] varList?: Variable[]
handleAddVariable: (payload: any) => void handleAddVariable: (payload: any) => void
modelConfig: ModelConfig
} }


const ConfigPrompt: FC<Props> = ({ const ConfigPrompt: FC<Props> = ({
hasSetBlockStatus, hasSetBlockStatus,
varList = [], varList = [],
handleAddVariable, handleAddVariable,
modelConfig,
}) => { }) => {
const { t } = useTranslation() const { t } = useTranslation()
const workflowStore = useWorkflowStore() const workflowStore = useWorkflowStore()
availableNodes={availableNodesWithParent} availableNodes={availableNodesWithParent}
varList={varList} varList={varList}
handleAddVariable={handleAddVariable} handleAddVariable={handleAddVariable}
modelConfig={modelConfig}
/> />
</div> </div>
) )
onEditionTypeChange={handleCompletionEditionTypeChange} onEditionTypeChange={handleCompletionEditionTypeChange}
handleAddVariable={handleAddVariable} handleAddVariable={handleAddVariable}
onGenerated={handleGenerated} onGenerated={handleGenerated}
modelConfig={modelConfig}
/> />
</div> </div>
)} )}

+ 0
- 6
web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx View File

import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res' import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
import { AppType } from '@/types/app' import { AppType } from '@/types/app'
import type { AutomaticRes } from '@/service/debug' import type { AutomaticRes } from '@/service/debug'
import type { ModelConfig } from '@/app/components/workflow/types'
import type { Model } from '@/types/app'

type Props = { type Props = {
className?: string className?: string
onGenerated?: (prompt: string) => void onGenerated?: (prompt: string) => void
modelConfig?: ModelConfig
} }


const PromptGeneratorBtn: FC<Props> = ({ const PromptGeneratorBtn: FC<Props> = ({
className, className,
onGenerated, onGenerated,
modelConfig,
}) => { }) => {
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false) const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
const handleAutomaticRes = useCallback((res: AutomaticRes) => { const handleAutomaticRes = useCallback((res: AutomaticRes) => {
isShow={showAutomatic} isShow={showAutomatic}
onClose={showAutomaticFalse} onClose={showAutomaticFalse}
onFinished={handleAutomaticRes} onFinished={handleAutomaticRes}
model={modelConfig as Model}
isInLLMNode isInLLMNode
/> />
)} )}

+ 0
- 1
web/app/components/workflow/nodes/llm/panel.tsx View File

hasSetBlockStatus={hasSetBlockStatus} hasSetBlockStatus={hasSetBlockStatus}
varList={inputs.prompt_config?.jinja2_variables || []} varList={inputs.prompt_config?.jinja2_variables || []}
handleAddVariable={handleAddVariable} handleAddVariable={handleAddVariable}
modelConfig={model}
/> />
)} )}



+ 0
- 1
web/service/debug.ts View File

prompt: string prompt: string
variables: string[] variables: string[]
opening_statement: string opening_statement: string
error?: string
} }


export const sendChatMessage = async (appId: string, body: Record<string, any>, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: { export const sendChatMessage = async (appId: string, body: Record<string, any>, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {

+ 53
- 56
web/types/app.ts View File

react = 'react', react = 'react',
} }


export type CompletionParams = {
/** Maximum number of tokens in the answer message returned by Completion */
max_tokens: number
/**
* A number between 0 and 2.
* The larger the number, the more random the result;
* otherwise, the more deterministic.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
temperature: number
/**
* Represents the proportion of probability mass samples to take,
* e.g., 0.1 means taking the top 10% probability mass samples.
* The determinism between the samples is basically consistent.
* Among these results, the `top_p` probability mass results are taken.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
top_p: number
/** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
echo: boolean
/**
* Specify up to 4 to automatically stop generating before the text specified in `stop`.
* Suitable for use in chat mode.
* For example, specify "Q" and "A",
* and provide some Q&A examples as context,
* and the model will give out in Q&A format and stop generating before Q&A.
*/
stop: string[]
/**
* A number between -2.0 and 2.0.
* The larger the value, the less the model will repeat topics and the more it will provide new topics.
*/
presence_penalty: number
/**
* A number between -2.0 and 2.0.
* A lower setting will make the model appear less cultured,
* always repeating expressions.
* The difference between `frequency_penalty` and `presence_penalty`
* is that `frequency_penalty` penalizes a word based on its frequency in the training data,
* while `presence_penalty` penalizes a word based on its occurrence in the input text.
*/
frequency_penalty: number
}
/** /**
* Model configuration. The backend type. * Model configuration. The backend type.
*/ */
export type Model = {
/** LLM provider, e.g., OPENAI */
provider: string
/** Model name, e.g, gpt-3.5.turbo */
name: string
mode: ModelModeType
/** Default Completion call parameters */
completion_params: CompletionParams
}

export type ModelConfig = { export type ModelConfig = {
opening_statement: string opening_statement: string
suggested_questions?: string[] suggested_questions?: string[]
strategy?: AgentStrategy strategy?: AgentStrategy
tools: ToolItem[] tools: ToolItem[]
} }
model: Model
model: {
/** LLM provider, e.g., OPENAI */
provider: string
/** Model name, e.g, gpt-3.5.turbo */
name: string
mode: ModelModeType
/** Default Completion call parameters */
completion_params: {
/** Maximum number of tokens in the answer message returned by Completion */
max_tokens: number
/**
* A number between 0 and 2.
* The larger the number, the more random the result;
* otherwise, the more deterministic.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
temperature: number
/**
* Represents the proportion of probability mass samples to take,
* e.g., 0.1 means taking the top 10% probability mass samples.
* The determinism between the samples is basically consistent.
* Among these results, the `top_p` probability mass results are taken.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
top_p: number
/** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
echo: boolean
/**
* Specify up to 4 to automatically stop generating before the text specified in `stop`.
* Suitable for use in chat mode.
* For example, specify "Q" and "A",
* and provide some Q&A examples as context,
* and the model will give out in Q&A format and stop generating before Q&A.
*/
stop: string[]
/**
* A number between -2.0 and 2.0.
* The larger the value, the less the model will repeat topics and the more it will provide new topics.
*/
presence_penalty: number
/**
* A number between -2.0 and 2.0.
* A lower setting will make the model appear less cultured,
* always repeating expressions.
* The difference between `frequency_penalty` and `presence_penalty`
* is that `frequency_penalty` penalizes a word based on its frequency in the training data,
* while `presence_penalty` penalizes a word based on its occurrence in the input text.
*/
frequency_penalty: number
}
}
dataset_configs: DatasetConfigs dataset_configs: DatasetConfigs
file_upload?: { file_upload?: {
image: VisionSettings image: VisionSettings

Loading…
Cancel
Save