import { OAIBaseComponent, type OmniComponentFormat, WorkerContext, OmniComponentMacroTypes } from "./path_to_file"const NS_OMNI = 'your_namespace'
let component = OAIBaseComponent.create(NS_OMNI, 'your_operationId')component
.fromScratch()
.set('description', 'Your description')
.set('title', 'Your title')
.set('category', 'Your category')
.setMethod('Your Method')
.setMeta({
source: {
summary: 'A standard text input component with built-in URL fetching, enabling it to be connected to File (Image/Audio/Document) sockets',
authors: ['Mercenaries.ai Team'],
links: {
'Mercenaries.ai': 'https://mercenaries.ai'
}
}
})const partialComponentFormat: Partial < OmniComponentFormat > = {
displayNamespace: 'your_display_namespace',
displayOperationId: 'your_display_operationId',
apiNamespace: 'your_api_namespace',
apiOperationId: 'your_api_operationId',
// other properties can be added as needed
};
component.fromJSON(partialComponentFormat)const input = component.addInput(
component.createInput('input_name', 'input_type', 'input_x-type')
.set('title', 'Input title')
.set('description', 'Input description')
.setDefault('default value')
.setConstraints(min value, max value)
.setChoice([])
.setRequired(true)
.allowMultiple(true) // enable an input to accept multiple connections.
.setControl({
controlType: 'alpine control type'
}) // Override the automatically selected control
.toOmniIO()
);component.createInput('input_name', 'input_type', 'input_x-type', {array:true}){array: true, customSettings: {array_separator = '-------' }};const inputs = [
{ name: 'text', type: 'string', description: 'A string', customSocket: 'text' },
// More input definitions...
{ name: 'usSocialSecurityNumber', type: 'boolean', default: true }
];
inputs.forEach(({ name, type, customSocket, description, default: defaultValue }) => {
component.addInput(
component.createInput(name, type, customSocket)
.set('description', description)
.setDefault(defaultValue)
.toOmniIO()
);
});let controlComposer = component.createControl('controlName')
component
.addControl(
controlComposer
.setRequired(true)
.setControlType('alpineControlType')
.toOmniControl()
);component.addControl(
component
.createControl("controlName")
.setRequired(true)
.setControlType("alpineControlType")
.toOmniControl()
);component.setMacro(OmniComponentMacroTypes.EXEC, (payload: any, ctx: WorkerContext) => {
// define your function here
})const YourComponent = component.toJSON()
export default YourComponentapp.blocks.addBlock(component)let patch= OAIBaseComponent
.createPatch(NS_ONMI, "text_replace")
.fromComponent(apiNamespace, apiOperationId)
....
.toJSON() //<-- OmnicomponentPatch
app.blocks.addPatch(patch)component.dependsOn(['automatic1111.interrogateapi_sdapi_v1_interrogate_post', 'automatic1111.simpleImage2Image']);import { OAIBaseComponent, WorkerContext, OmniComponentMacroTypes, Composer } from 'mercs_rete';let component = OAIBaseComponent
.create(NS_ONMI, "redactPll")
.fromScratch()
.set('title', 'PII Redaction')
.set('category', 'Compliance')
.set('description', 'Redact Personal Information from text')
.setMethod('X-CUSTOM')
.setMeta({
source: 'summary',
summary: 'A PII redactor built using the solvvy/redact-pii library, implementing regex based PII reduction along with support for Google Cloud',
links: {
'What is PII?': 'https://www.cloudflare.com/learning/privacy/what-is-pii/',
'redact-pii github': 'https://github.com/solvvy/redact-pii',
'google cloud DLP': 'https://cloud.google.com/d1pr',
'Solvy': 'https://www.solvvy.com/'
}
});const inputs = [
{ name: 'text', type: 'string', description: 'A string', customSocket: 'text' },
// More input definitions...
{ name: 'usSocialSecurityNumber', type: 'boolean', default: true }
];
inputs.forEach(({ name, type, customSocket, description, default: defaultValue }) => {
component.addInput(
component.createInput(name, type, customSocket)
.set('description', description)
.setDefault(defaultValue)
.toOmniIO()
);
});supports:
- "blocks:v2"export default {
hooks: extensionHooks,
createComponents: () => {
blocks: [ /*array of OmniComponentFormat*/ ],
patches: [ /*array of OmniComponentPatch*/ ]
}
}yarn build
title: My First Extension # Human readable title, ideally under 20 characters
version: 0.0.1 # Semver compatible version string
description: A small omnitool extension doing stuff
author: omni@example.com # Author name or email
origin: https://github.com/user/repository.git # url to the extensions git repository. This is not required for local extensions not published
client:
addToWorkbench: false # if set to true, the extension will be added to the client's extension menu
dependencies: # Optional field allowing auto installation of yarn packages required by an extensions in the same format as package.json. This is experimental and will likely change
package: package@stable
packages:
known_extensions:
- title: 3D Texture Playground #Human readable title
id: omni-extension-texture-playground #Extension id
url: https://raw.githubusercontent.com/user/repository/branchname/extension.yaml #discovery url for the raw extension.yaml file describing the extensions
...function createScript()
{
return {
description: "Create a flipbook from the current chat",
title: "Create Flipboard",
exec(args){
let images = [];
// Find every image in chat
window.client.chat.state.messages.forEach((msg) => {
if (msg.attachments && msg.images && msg.images.length > 0) {
images= images.concat(msg.images.map((img) => {
return img.url
}))
}
})
window.client.workbench.showExtension("omni-extension-flipbook", {images: images});
// One could also open the extension in a separate window:
// window.open(`./extensions/omni-extension-flipbook/?images=${encodeURIComponent(JSON.stringify(images))}`, '_blank', 'popup=1,toolbar=0,location=0,menubar=0');
window.client.sendSystemMessage(`Flipbook created, please check the extensions tab`, "text/markdown",
{
commands:
[
{
title: 'Show Flipbook',
id: 'toggleExtensions',
args: []
}
]
});
return true;
}
}
} const args = new URLSearchParams(location.search)
const params = JSON.parse(args.get('q'))
if (params.images) {
images.value = params.images
}
const blocks = []
const extensionHooks = {}
const blockFactory = (FactoryFn) =>
{
blocks.map((c) => FactoryFn(c.schema, c.functions))
}
export default {hooks: extensionHooks, createComponents: blockFactory}
// ServerExtensionManager.ts
enum PERMITTED_EXTENSIONS_EVENTS
{
'pre_request_execute' = 'pre_request_execute', // runs each time a block is preparing to execute it's underlying API call allowing manipulation of the outgoing call. Arg
'post_request_execute' = 'post_request_execute', // runs each time a block has executed it's underlying API call, allowing manipulation of the result
'component:x-input' = 'component:x-input', // runs each time a chat input block processes it's payload. Args: (ctx, payload). Allows modification of payload
'jobs.job_started' = 'job_started', // runs each time a workflow / job has started executing
'jobs.job_finished' = 'job_finished', // runs each time a workflow / job has stopped executing
'jobs.pre_workflow_start' = 'job_pre_start' // runs each time a workflow is prepping execution, allowing it to be cancelled. Args: (workflow, workflow_context, ctx, actions). Set actions.cancel to true to abort execution, set actions.cancelReason to a string to return a specific cancellation reason to the user.
'registry.package_installed': 'package_installed' //{ omniPackage:string, installationId, orgId, customBaseUrl, duration: (end - start).toFixed() })
}const extensionHooks = {
// An basic interceptor that replaces any chat input containing something like an email addresses with a simple replacement
// This runs on any chat input
'component:x-input': function(ctx, payload)
{
if (payload.text != null && typeof(payload.text) === 'string')
{
payload.text = payload.text.replace(\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\b, "example@example.com")
}
},
// Prevent any workflow from starting if the text input has the word clown
// This runs every time a workflow starts
'job_pre_start': function(ctx, workflow, workflow_context, actions)
{
console.log('job_pre_start pii scrubber', workflow_context.args)
if (workflow_context.args ?? workflow_context.args.text?.includes("clown"))
{
actions.cancel = true
actions.cancelReason = "N0 clowns allowed"
}
}
}
const MyCustomBlock =
{
schema: // <-- OpenAPI 3 Schema
{
// Namespace is automatically set to the extension id
"tags": ['default'],
"componentKey": "my-first-block", // <-- unique id within the namespace
//"apiKey": "my-first-block", // <-- optional, set to the 'parent' API if this is a normal rest component (not supported yet)
"operation": {
// operationId is automatically set to componentKey
"schema": {
"title": "My First Block", // <-- componentn title
"type": "object",
required:[],
"properties": {
"text": {
"title": "Some Text Input",
"type": "string", // <-- openAPI type
"x-type": "text", // <-- custom omnitool socket type if wanted
"default": "my default value",
"description": `My block description`
}
},
},
"responseTypes": {
"200": {
"schema": {
"required": [
"text"
],
"type": "string",
"properties": {
"text": {
"title": "My Output Text",
"type": "string",
},
},
},
"contentType": "application/json"
},
},
"method": "X-CUSTOM" // <-- This is important
},
patch: // <-- optional omnitool patch block
{
"title": "My Custom Component", /// <-- component
"category": "Test",
"summary": "Replaces Cars with Horses",
"meta":
{
"source":
{
"summary": "Replaces cars with horses",
links:
{
// list of string: string fields that are rendered as urls.
"research papaer": "https://arxiv.org..."
}
}
},
inputs: {...},
controls: {...}
outputs: {...}
}
},
functions: {
_exec: async (payload, ctx) => // <-- The _exec function is invoked when the component is run
{
if (payload.text)
{
payload.text = payload.text.replace("car", "horse" )
}
return payload // <-- Do not forget to return the altered payload
}
}
}
let blocks = [MyCustomBlock]
export default (FactoryFn: any) =>
{
return components.map((c) => FactoryFn(c.schema, c.functions))
}This provides instructions on how to execute Omnitool recipes through the REST API, enabling seamless integration into your system.
/generateJwtToken <action> <subject> <expires_in> [<recipe_id> (optional)]/generateJwtToken exec Workflow 3600000POST http://127.0.0.1:1688/api/v1/workflow/execAuthorization: Bearer <token>curl -X POST http://127.0.0.1:1688/api/v1/workflow/exec -H "Authorization: Bearer <token>"{ "workflow": "<recipe_id>", "args": {} }{
"workflow": "<recipe_id>",
"args": {
"text": "Example text",
"images": "http://example.com/image.png"
}
}
{ "result": { "status": "JOB_STARTED", "jobId": "bd999ed9-f2af-49cf-b8e1-fd9c3d2a5425", "sender": "omni" } }curl -X GET http://127.0.0.1:1688/api/v1/workflow/results?jobId=<job_id> -H "Authorization: Bearer <token>"



openainamespace: openai
api:
url: https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml
basePath: https://api.openai.com/v1
componentType: OAIComponent31
...namespace: getimg
api:
spec: ./api/getimg.yaml
basePath: https://api.getimg.ai
title: getimghttp_basic' | 'http_bearer' | 'apiKey' | 'oauth2namespace: elevenlabs
api:
url: https://api.elevenlabs.io/openapi.json
basePath: https://api.elevenlabs.io
auth:
type: apiKey
requireKeys:
- id: xi-api-key
displayName: xi-api-key
type: string
in: header
title: elevenlabsnamespace: openai
api:
url: https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml
basePath: https://api.openai.com/v1
componentType: OAIComponent31
auth:
filter:
operationIds:
- createChatCompletion
- createCompletion
- createImage
- createModeration
- createTranscription
- createTranslation
- createImageVariation
- createImageEdit
- createEdit
- listModels
- createEmbedding
title: openaicategory: Text-to-Speech
description: >-
Text to Speech Synthesis using the ElevenLabs API, supporting a variety of
monolingual (english) and multilingual voices.
meta:
source:
title: 'ElevenLabs: Text To Speech'
links:
Website: https://beta.elevenlabs.io/speech-synthesis
Subscription: https://beta.elevenlabs.io/subscription
API Reference: https://docs.elevenlabs.io/api-reference/quick-start/introduction
Documentation: https://docs.elevenlabs.io/welcome/introduction
Voice Lab: https://beta.elevenlabs.io/voice-lab
summary: >-
Text to Speech Synthesis using the ElevenLabs API, supporting a variety of
monolingual (english) and multilingual voices.
title: Text To Speech
apiNamespace: elevenlabs
apiOperationId: Text_to_speech_v1_text_to_speech__voice_id__post
displayNamespace: elevenlabs
displayOperationId: simplettsscripts:
hideExcept:inputs:
- prompt
- temperature
- model
- top_p
- seed
- max_tokens
- instruction
- images
hideExcept:outputs:
- textscripts:
transform:inputs:
transform:outputs:scripts:
hoist:inputcontrols:
preview:
type: AlpineImageGalleryComponent
displays: output:image
opts:
readonly: trueplaceholder?image:
customSocket: imagesocketOpts:
format: base64socketOpts:
format: base64_withHeadersocketOpts:
array: trueallowMultiple: trueinputs:
'n':
title: Number of Images model:
type: string
customSocket: text
choices:
block: getimg.listModels
cache: global
args:
pipeline: face-fix
family: enhancements
map:
title: name
value: idmessages:
scripts:
jsonata: >-
[{"role":"system", "content": $string(instruction) }, {"role":"user",
"content": $string(prompt) }]
delete:
- prompt
- instruction
hidden: truesecuritySchemesoutputs:
_omni_result:
hidden: trueimage_strength:
scripts:
jsonata: >
$exists(init_image_mode) and init_image_mode = "IMAGE_STRENGTH" ? image_strength : undefined
step_schedule_end:
scripts:
jsonata: >
$exists(init_image_mode) and init_image_mode = "STEP_SCHEDULE" ? step_schedule_end : undefined
step_schedule_start:
scripts:
jsonata: >
$exists(init_image_mode) and init_image_mode = "STEP_SCHEDULE" ? step_schedule_start : undefinedAccept:
hidden: true
default: application/json
Organization:
hidden: trueimage:
customSocket: file 

