{
  "$schema": "https://json-schema.org/draft/2020-12/schema",
  "$id": "https://catalog.lintel.tools/schemas/schemastore/proactions-ai-kit-configuration-for-templates/_shared/latest--partial-step.schema.json",
  "title": "ProActions AI-Kit Step Configuration Schema",
  "description": "A single step in a workflow. The 'step' property determines the type and schema to use.",
  "x-lintel": {
    "source": "https://raw.githubusercontent.com/em-al-wi/proactions-schema/main/schema/partial-step.schema.json",
    "sourceSha256": "fede0d8411bbfc6f6f831553203a9ea0460c7e54b78bcea84537af036f3ec2c3"
  },
  "type": "object",
  "oneOf": [
    {
      "$ref": "#/$defs/BREAK"
    },
    {
      "$ref": "#/$defs/DEBUG"
    },
    {
      "$ref": "#/$defs/FOR"
    },
    {
      "$ref": "#/$defs/IF"
    },
    {
      "$ref": "#/$defs/END"
    },
    {
      "$ref": "#/$defs/PARALLEL"
    },
    {
      "$ref": "#/$defs/SET"
    },
    {
      "$ref": "#/$defs/SWITCH"
    },
    {
      "$ref": "#/$defs/CALL_TEMPLATE"
    },
    {
      "$ref": "#/$defs/TRY"
    },
    {
      "$ref": "#/$defs/WHILE"
    },
    {
      "$ref": "#/$defs/GET_TEXT_CONTENT"
    },
    {
      "$ref": "#/$defs/GET_XML_CONTENT"
    },
    {
      "$ref": "#/$defs/SELECTED_OBJECT"
    },
    {
      "$ref": "#/$defs/SET_METADATA"
    },
    {
      "$ref": "#/$defs/SHOW_RESPONSE"
    },
    {
      "$ref": "#/$defs/SHOW_NOTIFICATION"
    },
    {
      "$ref": "#/$defs/INSERT_TEXT"
    },
    {
      "$ref": "#/$defs/INSERT_XML"
    },
    {
      "$ref": "#/$defs/REPLACE_TEXT"
    },
    {
      "$ref": "#/$defs/REPLACE_XML"
    },
    {
      "$ref": "#/$defs/INSERT_LIST"
    },
    {
      "$ref": "#/$defs/SANITIZE"
    },
    {
      "$ref": "#/$defs/SCRIPTING"
    },
    {
      "$ref": "#/$defs/TO_LIST"
    },
    {
      "$ref": "#/$defs/PARSE_JSON"
    },
    {
      "$ref": "#/$defs/MARKDOWN_TO_HTML"
    },
    {
      "$ref": "#/$defs/BASE64_TO_BLOB"
    },
    {
      "$ref": "#/$defs/BRIGHTER_AI"
    },
    {
      "$ref": "#/$defs/DEEPL_TRANSLATE"
    },
    {
      "$ref": "#/$defs/DEEPL_WRITE"
    },
    {
      "$ref": "#/$defs/EDAPI_OBJECT_CONTENT"
    },
    {
      "$ref": "#/$defs/UPLOAD_IMAGE"
    },
    {
      "$ref": "#/$defs/UPDATE_BINARY_CONTENT"
    },
    {
      "$ref": "#/$defs/UPLOAD"
    },
    {
      "$ref": "#/$defs/ELEVENLABS_TTS"
    },
    {
      "$ref": "#/$defs/ELEVENLABS_STT"
    },
    {
      "$ref": "#/$defs/HUB_YOUTUBE_AUTH_INIT"
    },
    {
      "$ref": "#/$defs/HUB_YOUTUBE_AUTH_STATUS"
    },
    {
      "$ref": "#/$defs/HUB_YOUTUBE_AUTH_LOGOUT"
    },
    {
      "$ref": "#/$defs/HUB_YOUTUBE_UPLOAD"
    },
    {
      "$ref": "#/$defs/HUB_CONTENT_EXTRACTION"
    },
    {
      "$ref": "#/$defs/OPENAI_ASSISTANT"
    },
    {
      "$ref": "#/$defs/OPENAI_THREAD"
    },
    {
      "$ref": "#/$defs/OPENAI_DELETE_THREAD"
    },
    {
      "$ref": "#/$defs/OPENAI_THREAD_FILES"
    },
    {
      "$ref": "#/$defs/OPENAI_THREAD_MESSAGE"
    },
    {
      "$ref": "#/$defs/OPENAI_COMPLETION"
    },
    {
      "$ref": "#/$defs/HUB_COMPLETION"
    },
    {
      "$ref": "#/$defs/AZURE_OPENAI_COMPLETION"
    },
    {
      "$ref": "#/$defs/OPENAI_IMAGE_GENERATION"
    },
    {
      "$ref": "#/$defs/HUB_IMAGE_GENERATION"
    },
    {
      "$ref": "#/$defs/AZURE_OPENAI_IMAGE_GENERATION"
    },
    {
      "$ref": "#/$defs/OPENAI_SPEECH"
    },
    {
      "$ref": "#/$defs/HUB_SPEECH"
    },
    {
      "$ref": "#/$defs/AZURE_OPENAI_SPEECH"
    },
    {
      "$ref": "#/$defs/OPENAI_TRANSCRIPTION"
    },
    {
      "$ref": "#/$defs/HUB_TRANSCRIPTION"
    },
    {
      "$ref": "#/$defs/AZURE_OPENAI_TRANSCRIPTION"
    },
    {
      "$ref": "#/$defs/REST"
    },
    {
      "$ref": "#/$defs/STABILITY_AI_UPSCALE"
    },
    {
      "$ref": "#/$defs/STABILITY_AI_OUTPAINT"
    },
    {
      "$ref": "#/$defs/STABILITY_AI_SEARCH_AND_REPLACE"
    },
    {
      "$ref": "#/$defs/STABILITY_AI_SEARCH_AND_RECOLOR"
    },
    {
      "$ref": "#/$defs/READ_CLIPBOARD"
    },
    {
      "$ref": "#/$defs/WRITE_CLIPBOARD"
    },
    {
      "$ref": "#/$defs/DOWNLOAD"
    },
    {
      "$ref": "#/$defs/FILE_UPLOAD"
    },
    {
      "$ref": "#/$defs/PROMPT"
    },
    {
      "$ref": "#/$defs/FORM"
    },
    {
      "$ref": "#/$defs/PLAY_AUDIO"
    },
    {
      "$ref": "#/$defs/USER_SELECT"
    },
    {
      "$ref": "#/$defs/IMAGE_PICKER"
    },
    {
      "$ref": "#/$defs/SHOW_PROGRESS"
    },
    {
      "$ref": "#/$defs/UPDATE_PROGRESS"
    },
    {
      "$ref": "#/$defs/HIDE_PROGRESS"
    },
    {
      "$ref": "#/$defs/SLEEP"
    },
    {
      "$ref": "#/$defs/CLEAR_SELECTION"
    },
    {
      "$ref": "#/$defs/CHANGE_VIEW_SIZE"
    }
  ],
  "x-generatedAt": "2025-11-10T10:15:26.968Z",
  "$defs": {
    "BREAK": {
      "type": "object",
      "description": "Breaks out of the current loop (e.g., WHILE or FOR) based on the given condition.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "BREAK"
          ],
          "description": "Breaks out of the current loop (e.g., WHILE or FOR) based on the given condition."
        },
        "condition": {
          "type": "string",
          "description": "The condition to evaluate. Loop will be broken when condition evaluates to true. If not provided, breaks unconditionally."
        }
      },
      "additionalProperties": false
    },
    "DEBUG": {
      "type": "object",
      "description": "Logs debug information about the current flow context and step configuration. Optionally triggers a debugger breakpoint.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "DEBUG"
          ],
          "description": "Logs debug information about the current flow context and step configuration. Optionally triggers a debugger breakpoint."
        },
        "id": {
          "type": "string",
          "description": "Optional identifier for the debug step, included in the log message."
        },
        "omitDebugger": {
          "type": "boolean",
          "description": "If true, skips triggering the debugger breakpoint.",
          "default": false
        }
      },
      "additionalProperties": false
    },
    "FOR": {
      "type": "object",
      "description": "Executes a set of steps for a fixed number of iterations (numeric) or iterates over an array (items).",
      "required": [
        "step",
        "var"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "FOR"
          ],
          "description": "Executes a set of steps for a fixed number of iterations (numeric) or iterates over an array (items)."
        },
        "var": {
          "type": "string",
          "description": "The name of the variable to set with the current iteration value."
        },
        "start": {
          "type": "number",
          "description": "The starting value of the loop variable (numeric)."
        },
        "end": {
          "type": "number",
          "description": "The ending value of the loop variable (numeric, inclusive)."
        },
        "by": {
          "type": "number",
          "description": "Step increment for numeric loops (can be negative). If omitted, inferred from start/end (1 or -1)."
        },
        "items": {
          "type": "string",
          "description": "An array to iterate over (e.g., an expression producing an array). If provided, numeric start/end are ignored."
        },
        "do": {
          "type": "array",
          "description": "Steps to execute in each iteration of the loop.",
          "items": {
            "$ref": "#"
          }
        }
      },
      "additionalProperties": false
    },
    "IF": {
      "type": "object",
      "description": "Execute a conditional branch based on a condition",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "IF"
          ],
          "description": "Execute a conditional branch based on a condition"
        },
        "condition": {
          "type": "string",
          "description": "The condition to evaluate. Can be a template expression with {{ }} syntax."
        },
        "test": {
          "type": "string",
          "description": "Legacy alternative to condition. The condition to evaluate using JavaScript syntax."
        },
        "then": {
          "type": "array",
          "description": "Steps to execute if the condition is true",
          "items": {
            "$ref": "#"
          }
        },
        "else": {
          "type": "array",
          "description": "Steps to execute if the condition is false",
          "items": {
            "$ref": "#"
          }
        }
      },
      "allOf": [
        {
          "anyOf": [
            {
              "required": [
                "condition"
              ]
            },
            {
              "required": [
                "test"
              ]
            }
          ]
        },
        {
          "not": {
            "required": [
              "condition",
              "test"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "END": {
      "type": "object",
      "description": "Ends the execution of a flow based on the given condition, with optional notification.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "END",
            "END_IF"
          ],
          "description": "Ends the execution of a flow based on the given condition, with optional notification. (Note: Aliases END_IF are deprecated, use 'END' instead)"
        },
        "condition": {
          "type": "string",
          "description": "The condition to evaluate. Flow will be terminated gracefully when condition evaluates to true."
        },
        "notification": {
          "type": "object",
          "description": "Optional notification to show when ending the flow."
        }
      },
      "additionalProperties": false
    },
    "PARALLEL": {
      "type": "object",
      "description": "Runs multiple step lists in parallel and merges their flow contexts upon completion.",
      "required": [
        "step",
        "steps"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "PARALLEL"
          ],
          "description": "Runs multiple step lists in parallel and merges their flow contexts upon completion."
        },
        "steps": {
          "type": "array",
          "description": "An array of step arrays, where each sub-array represents a branch of steps to execute in parallel.",
          "items": {
            "$ref": "#"
          }
        }
      },
      "additionalProperties": false
    },
    "SET": {
      "type": "object",
      "description": "Sets variables in the flow context. Can execute steps, evaluate expressions, or set key-value pairs.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SET"
          ],
          "description": "Sets variables in the flow context. Can execute steps, evaluate expressions, or set key-value pairs."
        },
        "steps": {
          "type": "array",
          "description": "Steps to execute before setting the variable. The result is stored in the flow context.",
          "items": {
            "$ref": "#"
          }
        },
        "expression": {
          "type": "string",
          "description": "JavaScript expression to evaluate and set as the variable value."
        },
        "text": {
          "type": "string",
          "description": "Text to resolve (with variables) and set as the variable value."
        },
        "raw_text": {
          "type": "string",
          "description": "Raw text to set as the variable value without resolution."
        },
        "name": {
          "type": "string",
          "description": "Name of the variable to set in the flow context."
        },
        "convertTo": {
          "type": "string",
          "description": "Convert the value to a specific type (e.g., \"number\").",
          "enum": [
            "number"
          ]
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "The resolved value stored in the flow context (when using legacy name/text syntax)."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": true
    },
    "SWITCH": {
      "type": "object",
      "description": "Evaluates a switch value and executes the steps for the matching case. Supports both array and object syntax for cases.",
      "required": [
        "step",
        "cases"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SWITCH"
          ],
          "description": "Evaluates a switch value and executes the steps for the matching case. Supports both array and object syntax for cases."
        },
        "condition": {
          "type": "string",
          "description": "The condition to evaluate for the switch value. Can be a template expression with {{ }} syntax."
        },
        "switch": {
          "type": "string",
          "description": "Legacy alternative to condition. The switch value to evaluate using JavaScript syntax."
        },
        "cases": {
          "type": "object",
          "description": "The cases to match against the switch value. Each case key should match a possible switch value, with the value being an array of steps to execute. Use \"default\" as a fallback case.",
          "additionalProperties": {
            "type": "array",
            "description": "Array of steps to execute when this case matches the switch value",
            "items": {
              "$ref": "#"
            }
          },
          "patternProperties": {
            "default": {
              "type": "array",
              "description": "Fallback steps to execute when no other case matches",
              "items": {
                "$ref": "#"
              }
            }
          }
        }
      },
      "additionalProperties": true
    },
    "CALL_TEMPLATE": {
      "type": "object",
      "description": "Calls a predefined template of steps by name and executes it.",
      "required": [
        "step",
        "name"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "CALL_TEMPLATE"
          ],
          "description": "Calls a predefined template of steps by name and executes it."
        },
        "name": {
          "type": "string",
          "description": "The name of the template to call from the TEMPLATES: section."
        }
      },
      "additionalProperties": false
    },
    "TRY": {
      "type": "object",
      "description": "Executes a set of steps in a try block, and if an error occurs, executes catch steps.",
      "required": [
        "step",
        "try"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "TRY"
          ],
          "description": "Executes a set of steps in a try block, and if an error occurs, executes catch steps."
        },
        "try": {
          "type": "array",
          "description": "Steps to execute in the try block.",
          "items": {
            "$ref": "#"
          }
        },
        "catch": {
          "type": "array",
          "description": "Steps to execute if an error occurs in the try block.",
          "items": {
            "$ref": "#"
          }
        }
      },
      "additionalProperties": false
    },
    "WHILE": {
      "type": "object",
      "description": "Repeatedly executes a set of steps while a condition evaluates to true.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "WHILE"
          ],
          "description": "Repeatedly executes a set of steps while a condition evaluates to true."
        },
        "condition": {
          "type": "string",
          "description": "The condition to evaluate for the loop. Can be a template expression with {{ }} syntax."
        },
        "test": {
          "type": "string",
          "description": "Legacy alternative to condition. The test expression to evaluate using JavaScript syntax."
        },
        "do": {
          "type": "array",
          "description": "Steps to execute in each iteration of the loop.",
          "items": {
            "$ref": "#"
          }
        }
      },
      "additionalProperties": false
    },
    "GET_TEXT_CONTENT": {
      "type": "object",
      "description": "Retrieves text content from the document or a specific location and sets it as output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "GET_TEXT_CONTENT",
            "CTX_GET_TEXT_CONTENT"
          ],
          "description": "Retrieves text content from the document or a specific location and sets it as output. (Note: Aliases CTX_GET_TEXT_CONTENT are deprecated, use 'GET_TEXT_CONTENT' instead)"
        },
        "at": {
          "type": "string",
          "description": "Location to read text from. Options: CURSOR, CURSOR_PARAGRAPH, XPATH, REPORT.",
          "enum": [
            "CURSOR",
            "CURSOR_PARAGRAPH",
            "XPATH",
            "REPORT"
          ]
        },
        "xpath": {
          "type": "string",
          "description": "XPath expression for reading text at a specific location."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "The retrieved text content."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "allOf": [
        {
          "if": {
            "properties": {
              "at": {
                "const": "XPATH"
              }
            }
          },
          "then": {
            "required": [
              "xpath"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "GET_XML_CONTENT": {
      "type": "object",
      "description": "Retrieves XML content from the document or a specific location and sets it as output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "GET_XML_CONTENT",
            "CTX_GET_XML_CONTENT"
          ],
          "description": "Retrieves XML content from the document or a specific location and sets it as output. (Note: Aliases CTX_GET_XML_CONTENT are deprecated, use 'GET_XML_CONTENT' instead)"
        },
        "at": {
          "type": "string",
          "description": "Location to read XML from. Options: CURSOR, CURSOR_PARAGRAPH, XPATH, REPORT.",
          "enum": [
            "CURSOR",
            "CURSOR_PARAGRAPH",
            "XPATH",
            "REPORT"
          ]
        },
        "xpath": {
          "type": "string",
          "description": "XPath expression for reading XML at a specific location."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "The retrieved XML content as a string."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "The retrieved XML content as an object (for REPORT)."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "allOf": [
        {
          "if": {
            "properties": {
              "at": {
                "const": "XPATH"
              }
            }
          },
          "then": {
            "required": [
              "xpath"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "SELECTED_OBJECT": {
      "type": "object",
      "description": "Retrieves information about the currently selected content object and sets it as output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SELECTED_OBJECT",
            "CTX_SELECTED_OBJECT"
          ],
          "description": "Retrieves information about the currently selected content object and sets it as output. (Note: Aliases CTX_SELECTED_OBJECT are deprecated, use 'SELECTED_OBJECT' instead)"
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "The ID of the selected content."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "The full information object of the selected content."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "SET_METADATA": {
      "type": "object",
      "description": "Set a field value in the object panel using a selector or XPath. Supports tagsinput and plain fields.",
      "required": [
        "step",
        "selector"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SET_METADATA",
            "CTX_SET_METADATA"
          ],
          "description": "Set a field value in the object panel using a selector or XPath. Supports tagsinput and plain fields. (Note: Aliases CTX_SET_METADATA are deprecated, use 'SET_METADATA' instead)"
        },
        "selector": {
          "type": "string",
          "description": "CSS selector of the input element to update."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Text value to set in the field (used for non-tagsinput fields and fallback)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "List of values to set (used for tagsinput fields)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "SHOW_RESPONSE": {
      "type": "object",
      "description": "Shows a response to the user in a modal. Can run inline steps before showing and supports HTML mode.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SHOW_RESPONSE",
            "CTX_SHOW_RESPONSE"
          ],
          "description": "Shows a response to the user in a modal. Can run inline steps before showing and supports HTML mode. (Note: Aliases CTX_SHOW_RESPONSE are deprecated, use 'SHOW_RESPONSE' instead)"
        },
        "inlineSteps": {
          "type": "array",
          "description": "Steps to execute while showing an in-place loading UI before the response is displayed.",
          "items": {
            "$ref": "#"
          }
        },
        "mode": {
          "type": "string",
          "description": "Render mode for the response. Use \"html\" to render HTML; otherwise plain text is used.",
          "enum": [
            "html",
            "text"
          ]
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text content to display in the modal"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "SHOW_NOTIFICATION": {
      "type": "object",
      "description": "Shows a notification to the user with optional title and resolved message.",
      "required": [
        "step",
        "message"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SHOW_NOTIFICATION",
            "CTX_SHOW_NOTIFICATION"
          ],
          "description": "Shows a notification to the user with optional title and resolved message. (Note: Aliases CTX_SHOW_NOTIFICATION are deprecated, use 'SHOW_NOTIFICATION' instead)"
        },
        "notificationType": {
          "type": "string",
          "description": "Type of notification (info, warning, error, message, task, success).",
          "enum": [
            "info",
            "warning",
            "error",
            "message",
            "task",
            "success"
          ],
          "default": "info"
        },
        "message": {
          "type": "string",
          "description": "The message to show. Supports variable resolution."
        },
        "title": {
          "type": "string",
          "description": "Optional title for the notification."
        }
      },
      "additionalProperties": false
    },
    "INSERT_TEXT": {
      "type": "object",
      "description": "Insert text at the given location (cursor, xpath or report). If the step reads text from the flowContext, you can use a preceding SET step to provide the content.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "INSERT_TEXT",
            "CTX_INSERT_TEXT"
          ],
          "description": "Insert text at the given location (cursor, xpath or report). If the step reads text from the flowContext, you can use a preceding SET step to provide the content. (Note: Aliases CTX_INSERT_TEXT are deprecated, use 'INSERT_TEXT' instead)"
        },
        "in": {
          "type": "string",
          "description": "Optional path or variable name to read the content from the flowContext. If omitted, the step uses the content from the default text input."
        },
        "at": {
          "type": "string",
          "description": "Where to insert the text. Use CURSOR (default), XPATH, or REPORT.",
          "enum": [
            "CURSOR",
            "XPATH",
            "REPORT"
          ]
        },
        "xpath": {
          "type": "string",
          "description": "XPath expression used when `at` is XPATH."
        },
        "forceWrite": {
          "type": "boolean",
          "description": "Write content even if the document is considered read-only."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text content to insert. If omitted, reads from the `in` parameter or default text input."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "allOf": [
        {
          "if": {
            "properties": {
              "at": {
                "const": "XPATH"
              }
            }
          },
          "then": {
            "required": [
              "xpath"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "INSERT_XML": {
      "type": "object",
      "description": "Insert XML content at the given location. The content can be provided via the flowContext (using `in`) or via the default text input.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "INSERT_XML",
            "CTX_INSERT_XML"
          ],
          "description": "Insert XML content at the given location. The content can be provided via the flowContext (using `in`) or via the default text input. (Note: Aliases CTX_INSERT_XML are deprecated, use 'INSERT_XML' instead)"
        },
        "in": {
          "type": "string",
          "description": "Optional path or variable name to read the XML content from the flowContext. If omitted, the step uses the default text input."
        },
        "at": {
          "type": "string",
          "description": "Where to insert the XML. Use CURSOR (default) or XPATH.",
          "enum": [
            "CURSOR",
            "XPATH"
          ]
        },
        "xpath": {
          "type": "string",
          "description": "XPath expression used when `at` is XPATH."
        },
        "position": {
          "type": "string",
          "description": "Position mode for insertion when using cursor (e.g. insertInline, insertBefore, insertAfter).",
          "enum": [
            "insertInline",
            "insertBefore",
            "insertAfter"
          ],
          "default": "insertInline"
        },
        "forceWrite": {
          "type": "boolean",
          "description": "Write content even if the document is considered read-only."
        }
      },
      "allOf": [
        {
          "if": {
            "properties": {
              "at": {
                "const": "XPATH"
              }
            }
          },
          "then": {
            "required": [
              "xpath"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "REPLACE_TEXT": {
      "type": "object",
      "description": "Replace existing text at the given location (cursor paragraph, cursor or xpath) with the provided content. Content can come from a flow variable (using `in`) or the default text input.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "REPLACE_TEXT",
            "CTX_REPLACE_TEXT"
          ],
          "description": "Replace existing text at the given location (cursor paragraph, cursor or xpath) with the provided content. Content can come from a flow variable (using `in`) or the default text input. (Note: Aliases CTX_REPLACE_TEXT are deprecated, use 'REPLACE_TEXT' instead)"
        },
        "in": {
          "type": "string",
          "description": "Optional text or variable name to read the content from the flowContext. If omitted, the step uses the default text input."
        },
        "at": {
          "type": "string",
          "description": "Where to replace the text. Options: XPATH, CURSOR_PARAGRAPH, CURSOR.",
          "enum": [
            "XPATH",
            "CURSOR_PARAGRAPH",
            "CURSOR"
          ]
        },
        "xpath": {
          "type": "string",
          "description": "XPath expression used when `at` is XPATH."
        },
        "forceWrite": {
          "type": "boolean",
          "description": "Write content even if the document is considered read-only."
        }
      },
      "allOf": [
        {
          "if": {
            "properties": {
              "at": {
                "const": "XPATH"
              }
            }
          },
          "then": {
            "required": [
              "xpath"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "REPLACE_XML": {
      "type": "object",
      "description": "Replace XML content at the given location. Content can be sourced from a flow variable (`in`) or the default text input.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "REPLACE_XML",
            "CTX_REPLACE_XML"
          ],
          "description": "Replace XML content at the given location. Content can be sourced from a flow variable (`in`) or the default text input. (Note: Aliases CTX_REPLACE_XML are deprecated, use 'REPLACE_XML' instead)"
        },
        "in": {
          "type": "string",
          "description": "Optional text or variable name to read the XML content from the flowContext."
        },
        "at": {
          "type": "string",
          "description": "Where to replace the XML. Options: XPATH, CURSOR_PARAGRAPH, CURSOR.",
          "enum": [
            "XPATH",
            "CURSOR_PARAGRAPH",
            "CURSOR"
          ]
        },
        "xpath": {
          "type": "string",
          "description": "XPath expression used when `at` is XPATH."
        },
        "forceWrite": {
          "type": "boolean",
          "description": "Write content even if the document is considered read-only."
        }
      },
      "allOf": [
        {
          "if": {
            "properties": {
              "at": {
                "const": "XPATH"
              }
            }
          },
          "then": {
            "required": [
              "xpath"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "INSERT_LIST": {
      "type": "object",
      "description": "Insert a list of items into the document. The items can be provided as a list input or be generated from a text input (converted to a list using ProcessToList).",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "INSERT_LIST",
            "CTX_INSERT_LIST"
          ],
          "description": "Insert a list of items into the document. The items can be provided as a list input or be generated from a text input (converted to a list using ProcessToList). (Note: Aliases CTX_INSERT_LIST are deprecated, use 'INSERT_LIST' instead)"
        },
        "in": {
          "type": "string",
          "description": "Optional text or variable name to read the content (text) from the flowContext. If omitted, the step uses the default text input."
        },
        "containerElement": {
          "type": "string",
          "description": "Container tag to use for the list (default 'ul')."
        },
        "listItemElement": {
          "type": "string",
          "description": "List item tag to use for entries (default 'li')."
        },
        "omitContainer": {
          "type": "boolean",
          "description": "When true, inserts items one-by-one instead of a full container."
        },
        "xpath": {
          "type": "string",
          "description": "XPath expression used when `at` is XPATH."
        },
        "at": {
          "type": "string",
          "description": "Where to insert the list. Use CURSOR (default) or XPATH.",
          "enum": [
            "CURSOR",
            "XPATH"
          ]
        },
        "position": {
          "type": "string",
          "description": "Position mode for insertion when using cursor (e.g. insertInline, insertBefore, insertAfter).",
          "enum": [
            "insertInline",
            "insertBefore",
            "insertAfter"
          ],
          "default": "insertInline"
        },
        "forceWrite": {
          "type": "boolean",
          "description": "Write content even if the document is considered read-only."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "List of items to insert. If omitted, the step converts the text input to a list."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Text content to convert to a list if no list input is provided."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "allOf": [
        {
          "if": {
            "properties": {
              "at": {
                "const": "XPATH"
              }
            }
          },
          "then": {
            "required": [
              "xpath"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "SANITIZE": {
      "type": "object",
      "description": "Sanitizes text input. Can strip markdown code blocks and validate/repair XML content before forwarding the result to the flow context.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SANITIZE"
          ],
          "description": "Sanitizes text input. Can strip markdown code blocks and validate/repair XML content before forwarding the result to the flow context."
        },
        "stripMarkdown": {
          "type": "boolean",
          "description": "If true, removes markdown code blocks (``` ... ```) from the input before further processing.",
          "default": false
        },
        "validateAndRepairXml": {
          "type": "boolean",
          "description": "If true, attempts to validate and repair XML fragments found in the input.",
          "default": false
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text input to sanitize"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Sanitized text output after processing."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "SCRIPTING": {
      "type": "object",
      "description": "Executes inline JavaScript (`script`) or a JS-style template (`template`). Use with caution — only in trusted flows.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SCRIPTING"
          ],
          "description": "Executes inline JavaScript (`script`) or a JS-style template (`template`). Use with caution — only in trusted flows."
        },
        "script": {
          "type": "string",
          "description": "A JavaScript expression or block to be executed in the step context. If `mode: \"async\"` the script is treated as async."
        },
        "template": {
          "type": "string",
          "description": "A template string that will be resolved using the step context and then parsed as JSON. Use this when you want to build structured output."
        },
        "mode": {
          "type": "string",
          "description": "Execution mode for scripts. Use \"async\" to run the script using the asynchronous evaluator.",
          "enum": [
            "async"
          ]
        }
      },
      "allOf": [
        {
          "anyOf": [
            {
              "required": [
                "script"
              ]
            },
            {
              "required": [
                "template"
              ]
            }
          ]
        },
        {
          "not": {
            "required": [
              "script",
              "template"
            ]
          }
        }
      ],
      "additionalProperties": false
    },
    "TO_LIST": {
      "type": "object",
      "description": "Convert a textual AI response into a list of strings. Detects JSON arrays, ordered/unordered lists, comma-separated values, or falls back to a single-item array.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "TO_LIST",
            "PROCESS_TO_LIST"
          ],
          "description": "Convert a textual AI response into a list of strings. Detects JSON arrays, ordered/unordered lists, comma-separated values, or falls back to a single-item array. (Note: Aliases PROCESS_TO_LIST are deprecated, use 'TO_LIST' instead)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text input to convert to a list (supports JSON arrays, markdown lists, CSV, etc.)"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "list",
                "description": "Array of extracted list items (strings)"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "PARSE_JSON": {
      "type": "object",
      "description": "Parses JSON content from the text input and stores the resulting object in the flow context.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "PARSE_JSON",
            "PROCESS_PARSE_JSON"
          ],
          "description": "Parses JSON content from the text input and stores the resulting object in the flow context. (Note: Aliases PROCESS_PARSE_JSON are deprecated, use 'PARSE_JSON' instead)"
        },
        "in": {
          "type": "string",
          "description": "Optional path or variable name to read the JSON string from the flowContext. If omitted, the default text input is used."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "JSON string to parse. If omitted, reads from the `in` parameter or default text input."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "The parsed JSON object stored in the flow context."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "The original JSON text string (passthrough)."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "MARKDOWN_TO_HTML": {
      "type": "object",
      "description": "Converts markdown text to HTML and sets it as text output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "MARKDOWN_TO_HTML"
          ],
          "description": "Converts markdown text to HTML and sets it as text output."
        },
        "text": {
          "type": "string",
          "description": "Text input or flow variable to convert. If omitted, the default text input is used."
        }
      },
      "additionalProperties": false
    },
    "BASE64_TO_BLOB": {
      "type": "object",
      "description": "Converts a base64-encoded string to a Blob and stores it in the flow context as a blob output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "BASE64_TO_BLOB"
          ],
          "description": "Converts a base64-encoded string to a Blob and stores it in the flow context as a blob output."
        },
        "in": {
          "type": "string",
          "description": "Optional path or variable name to read the base64 string from the flowContext. If omitted, the default text input is used."
        },
        "contentType": {
          "type": "string",
          "description": "Optional content type for the resulting Blob (e.g. \"image/png\")."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "The resulting Blob created from the base64 input."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "BRIGHTER_AI": {
      "type": "object",
      "description": "Upload an image to BrighterAI, poll for processing completion and download the anonymized image. The resulting anonymized image blob is written to the configured output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "BRIGHTER_AI",
            "SERVICE_BRIGHTER_AI"
          ],
          "description": "Upload an image to BrighterAI, poll for processing completion and download the anonymized image. The resulting anonymized image blob is written to the configured output. (Note: Aliases SERVICE_BRIGHTER_AI are deprecated, use 'BRIGHTER_AI' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'BRIGHTER_AI'."
        },
        "serviceName": {
          "type": "string",
          "description": "Service operation name (e.g. blur, dnat, mask). If not provided, blur is used."
        },
        "params": {
          "type": "object",
          "description": "Optional url parameters object passed to the BrighterAI upload endpoint."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "blob",
                    "description": "Input image blob to be processed (can come from previous steps or a flow variable)."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageName",
                    "description": "Optional image name to use when uploading (defaults to \"image.jpg\")."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "The anonymized image blob returned by the BrighterAI service."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "DEEPL_TRANSLATE": {
      "type": "object",
      "description": "Translate text using DeepL. Supports many optional DeepL parameters and returns the translated text as the default text output.",
      "required": [
        "step",
        "instruction",
        "target_lang"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "DEEPL_TRANSLATE",
            "SERVICE_DEEPL_TRANSLATE"
          ],
          "description": "Translate text using DeepL. Supports many optional DeepL parameters and returns the translated text as the default text output. (Note: Aliases SERVICE_DEEPL_TRANSLATE are deprecated, use 'DEEPL_TRANSLATE' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'DEEPL'."
        },
        "instruction": {
          "type": "string",
          "description": "Text to translate (can include templates). If omitted, the default text input is used."
        },
        "target_lang": {
          "type": "string",
          "description": "Target language code for the translation (e.g. EN, DE, FR)."
        },
        "replaceXmlLang": {
          "type": "string",
          "description": "Optional: replace xml:lang attributes in the translated output with this language code."
        },
        "tag_handling": {
          "type": "string",
          "description": "How to handle tags; default is \"xml\".",
          "enum": [
            "xml",
            "html",
            "none"
          ]
        },
        "source_lang": {
          "type": "string",
          "description": "Language of the text to be translated. If this parameter is omitted, the API will attempt to detect the language of the text and translate it."
        },
        "context": {
          "type": "string",
          "description": "Additional context that can influence a translation but is not translated itself."
        },
        "split_sentences": {
          "type": "string",
          "description": "Sets whether the translation engine should first split the input into sentences.",
          "enum": [
            "0",
            "1",
            "nonewlines"
          ]
        },
        "preserve_formatting": {
          "type": "boolean",
          "description": "Sets whether the translation engine should respect the original formatting, even if it would usually"
        },
        "formality": {
          "type": "string",
          "description": "Sets whether the translated text should lean towards formal or informal language.",
          "enum": [
            "default",
            "more",
            "less",
            "prefer_more",
            "prefer_less"
          ]
        },
        "glossary_id": {
          "type": "string",
          "description": "Specify the glossary to use for the translation."
        },
        "outline_detection": {
          "type": "boolean",
          "description": "Disable the automatic detection of XML"
        },
        "non_splitting_tags": {
          "type": "string",
          "description": "Comma-separated list of XML tags which never split sentences."
        },
        "splitting_tags": {
          "type": "string",
          "description": "Comma-separated list of XML tags which always cause splits."
        },
        "ignore_tags": {
          "type": "string",
          "description": "Comma-separated list of XML tags that indicate text not to be translated."
        },
        "model_type": {
          "type": "string",
          "description": "Specifies which DeepL model should be used for translation.",
          "enum": [
            "quality_optimized",
            "prefer_quality_optimized",
            "latency_optimized"
          ]
        },
        "show_billed_characters": {
          "type": "boolean",
          "description": "When true, the response will include the billed_characters parameter."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Translated text"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "DEEPL_WRITE": {
      "type": "object",
      "description": "Rephrase/write text using DeepL Write endpoint. Returns the rephrased text in the default text output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "DEEPL_WRITE",
            "SERVICE_DEEPL_WRITE"
          ],
          "description": "Rephrase/write text using DeepL Write endpoint. Returns the rephrased text in the default text output. (Note: Aliases SERVICE_DEEPL_WRITE are deprecated, use 'DEEPL_WRITE' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'DEEPL_WRITE'."
        },
        "instruction": {
          "type": "string",
          "description": "Text to be rephrased or written. If omitted, default text input is used."
        },
        "target_lang": {
          "type": "string",
          "description": "Optional target language for writing/rephrasing."
        },
        "writing_style": {
          "type": "string",
          "description": "Optional writing style parameter."
        },
        "tone": {
          "type": "string",
          "description": "Optional tone parameter for the writing API."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Rewritten text"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "EDAPI_OBJECT_CONTENT": {
      "type": "object",
      "description": "Fetch object content from EDAPI and return it as blob or configured outputs. The content id can be provided via cfg.contentId or via a text input.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "EDAPI_OBJECT_CONTENT"
          ],
          "description": "Fetch object content from EDAPI and return it as blob or configured outputs. The content id can be provided via cfg.contentId or via a text input."
        },
        "contentId": {
          "type": "string",
          "description": "Explicit content id to fetch. If omitted, the step will try to read the id from the default text input."
        },
        "format": {
          "type": "string",
          "description": "Format of the content to fetch.",
          "default": "lowres"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Alternate input path to read the content id from the flow context (used if contentId is not provided)."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "The fetched content blob (default output)."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "UPLOAD_IMAGE": {
      "type": "object",
      "description": "Upload an image to EDAPI by providing an image object (url or blob). Returns the created object metadata as output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "UPLOAD_IMAGE",
            "CTX_UPLOAD_IMAGE"
          ],
          "description": "Upload an image to EDAPI by providing an image object (url or blob). Returns the created object metadata as output. (Note: Aliases CTX_UPLOAD_IMAGE are deprecated, use 'UPLOAD_IMAGE' instead)"
        },
        "objectType": {
          "type": "string",
          "description": "Type of object to create (default: Image)."
        },
        "createMode": {
          "type": "string",
          "description": "Creation mode, e.g. AUTO_RENAME."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "image",
                "description": "Image input object or blob to upload (can come from file picker or previous step)."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "The created object metadata returned by EDAPI."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "UPDATE_BINARY_CONTENT": {
      "type": "object",
      "description": "Update binary content of an existing object in EDAPI using the provided blob. Returns updated object metadata.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "UPDATE_BINARY_CONTENT",
            "CTX_UPDATE_BINARY_CONTENT"
          ],
          "description": "Update binary content of an existing object in EDAPI using the provided blob. Returns updated object metadata. (Note: Aliases CTX_UPDATE_BINARY_CONTENT are deprecated, use 'UPDATE_BINARY_CONTENT' instead)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "blob",
                    "description": "Binary blob to upload for the object"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Id of the object to update"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "Updated object metadata"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "UPLOAD": {
      "type": "object",
      "description": "Upload content to EDAPI by providing a blob or specifying parameters (filename, type). Returns created object metadata.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "UPLOAD"
          ],
          "description": "Upload content to EDAPI by providing a blob or specifying parameters (filename, type). Returns created object metadata."
        },
        "options": {
          "type": "object",
          "description": "Obtions data for the UPLOAD"
        },
        "filename": {
          "type": "string",
          "description": "Filename to use for the uploaded object"
        },
        "basetype": {
          "type": "string",
          "description": "BaseType - Used to identify upload location when using basefolder configuration."
        },
        "type": {
          "type": "string",
          "description": "Object type to create (e.g. File, Image)"
        },
        "createMode": {
          "type": "string",
          "description": "Creation mode (e.g. AUTO_RENAME)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Binary blob to upload (preferred)"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "The created object metadata"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "ELEVENLABS_TTS": {
      "type": "object",
      "description": "Generate speech audio from text using ElevenLabs TTS. Text is taken from the default text input or a flow variable via `in`.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "ELEVENLABS_TTS"
          ],
          "description": "Generate speech audio from text using ElevenLabs TTS. Text is taken from the default text input or a flow variable via `in`."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'ELEVENLABS'."
        },
        "apiKey": {
          "type": "string",
          "description": "Override API key from service configuration"
        },
        "endpoint": {
          "type": "string",
          "description": "Override endpoint URL from service configuration"
        },
        "voice_id": {
          "type": "string",
          "description": "Voice identifier to use for synthesis"
        },
        "model_id": {
          "type": "string",
          "description": "TTS model id"
        },
        "voice_settings": {
          "type": "object",
          "description": "Optional voice settings object"
        },
        "output_format": {
          "type": "string",
          "description": "Desired audio output format (e.g. mp3, wav, pcm)"
        },
        "language_code": {
          "type": "string",
          "description": "Language code for the synthesis"
        },
        "pronunciation_dictionary_locators": {
          "type": "string",
          "description": "Pronunciation dictionary locators (array or string) - resolved"
        },
        "seed": {
          "type": "string",
          "description": "Seed for deterministic generation"
        },
        "previous_text": {
          "type": "string",
          "description": "Optional previous_text context"
        },
        "next_text": {
          "type": "string",
          "description": "Optional next_text context"
        },
        "apply_text_normalization": {
          "type": "boolean",
          "description": "Apply normalization to text"
        },
        "apply_language_text_normalization": {
          "type": "boolean",
          "description": "Apply language specific normalization"
        },
        "mime_type": {
          "type": "string",
          "description": "MIME type to use for the produced blob (e.g. audio/mpeg)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text to synthesize. If omitted, the default text input is used."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Generated audio blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "ELEVENLABS_STT": {
      "type": "object",
      "description": "Transcribe audio using ElevenLabs STT. Provides the transcribed text and optional JSON result.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "ELEVENLABS_STT"
          ],
          "description": "Transcribe audio using ElevenLabs STT. Provides the transcribed text and optional JSON result."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'ELEVENLABS'."
        },
        "apiKey": {
          "type": "string",
          "description": "Override API key from service configuration"
        },
        "endpoint": {
          "type": "string",
          "description": "Override endpoint URL from service configuration"
        },
        "model_id": {
          "type": "string",
          "description": "Model id to use for transcription - default: scribe_v1"
        },
        "language_code": {
          "type": "string",
          "description": "Language code for transcription"
        },
        "tag_audio_events": {
          "type": "boolean",
          "description": "Tag audio events option"
        },
        "num_speakers": {
          "type": "number",
          "description": "Number of speakers to detect"
        },
        "timestamps_granularity": {
          "type": "string",
          "description": "Timestamps granularity"
        },
        "diarize": {
          "type": "boolean",
          "description": "Enable diarization"
        },
        "additional_formats": {
          "type": "array",
          "description": "Additional output formats"
        },
        "file_format": {
          "type": "string",
          "description": "Input file format (e.g. wav, mp3)"
        },
        "cloud_storage_url": {
          "type": "string",
          "description": "URL to fetch input from cloud"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "file",
                "description": "Audio file or blob to transcribe"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Transcribed text"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "json",
                    "description": "Full transcription result (optional)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_YOUTUBE_AUTH_INIT": {
      "type": "object",
      "description": "Initiate OAuth authorization flow for YouTube (requires ProActions-Hub). Opens the auth URL when available unless cfg.omitOpenAuth is true.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_YOUTUBE_AUTH_INIT"
          ],
          "description": "Initiate OAuth authorization flow for YouTube (requires ProActions-Hub). Opens the auth URL when available unless cfg.omitOpenAuth is true."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "omitOpenAuth": {
          "type": "boolean",
          "description": "If true, do not automatically open the authorization URL in a new window; return the URL in the response instead."
        },
        "account": {
          "type": "string",
          "description": "Optional account identifier used by the Hub API."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "Result object returned by the auth/init call (contains authUrl etc.)"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_YOUTUBE_AUTH_STATUS": {
      "type": "object",
      "description": "Check the status of a previously initiated YouTube OAuth authorization flow.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_YOUTUBE_AUTH_STATUS"
          ],
          "description": "Check the status of a previously initiated YouTube OAuth authorization flow."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "account": {
          "type": "string",
          "description": "Optional account identifier used by the Hub API."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "Status object returned by the auth/status endpoint"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_YOUTUBE_AUTH_LOGOUT": {
      "type": "object",
      "description": "Log out the configured Hub YouTube account. Uses POST to logout and returns the resulting object.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_YOUTUBE_AUTH_LOGOUT"
          ],
          "description": "Log out the configured Hub YouTube account. Uses POST to logout and returns the resulting object."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "account": {
          "type": "string",
          "description": "Account identifier to log out (resolved)."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "Result from the logout endpoint"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_YOUTUBE_UPLOAD": {
      "type": "object",
      "description": "Upload a video to YouTube via the Hub service. Supports video file, optional thumbnail, metadata fields (title, description, privacyStatus, categoryId), tags and additionalMetadata. Progress can be shown when cfg.updateProgress is true.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_YOUTUBE_UPLOAD"
          ],
          "description": "Upload a video to YouTube via the Hub service. Supports video file, optional thumbnail, metadata fields (title, description, privacyStatus, categoryId), tags and additionalMetadata. Progress can be shown when cfg.updateProgress is true."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "title": {
          "type": "string",
          "description": "Video title"
        },
        "description": {
          "type": "string",
          "description": "Video description"
        },
        "privacyStatus": {
          "type": "string",
          "description": "Privacy status: public, unlisted, or private",
          "enum": [
            "public",
            "unlisted",
            "private"
          ]
        },
        "categoryId": {
          "type": "string",
          "description": "YouTube category id"
        },
        "tags": {
          "type": "array",
          "description": "Array of tags to set on the uploaded video"
        },
        "additionalMetadata": {
          "type": "object",
          "description": "Optional object with additional metadata to attach"
        },
        "updateProgress": {
          "type": "boolean",
          "description": "If true, progress will be reported to ProgressBar during upload"
        },
        "account": {
          "type": "string",
          "description": "Account identifier for the Hub target (resolved)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "video",
                    "description": "Primary video file or blob to upload"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "thumbnail",
                    "description": "Optional thumbnail file/blob"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "Result object returned by the upload endpoint (video metadata)"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_CONTENT_EXTRACTION": {
      "type": "object",
      "description": "Extract textual content from a URL using the ProActions-Hub extraction tool. The URL can be provided via the default text input or the `in`/`url` configuration.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_CONTENT_EXTRACTION"
          ],
          "description": "Extract textual content from a URL using the ProActions-Hub extraction tool. The URL can be provided via the default text input or the `in`/`url` configuration."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "URL to extract content from. If omitted, the step will try to read the URL from the default text input."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "object",
                "description": "The parsed extraction result object returned by the Hub extraction API."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_ASSISTANT": {
      "type": "object",
      "description": "Create or reuse an OpenAI assistant. If cfg.assistantId is provided the assistant will be retrieved; otherwise a new assistant is created. The created or retrieved assistant is written to the flow context.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_ASSISTANT"
          ],
          "description": "Create or reuse an OpenAI assistant. If cfg.assistantId is provided the assistant will be retrieved; otherwise a new assistant is created. The created or retrieved assistant is written to the flow context."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_COMPLETION'."
        },
        "assistantId": {
          "type": "string",
          "description": "If provided, load the assistant with this ID instead of creating a new one."
        },
        "reuse": {
          "type": "boolean",
          "description": "If true and storeIn is provided, try to reuse a persisted assistant id."
        },
        "storeIn": {
          "type": "string",
          "description": "Storage location for persisting assistant id: page/session/browser.",
          "enum": [
            "page",
            "session",
            "browser"
          ]
        },
        "instruction": {
          "type": "string",
          "description": "Initial instructions / system prompt for the assistant (supports templates)."
        },
        "model": {
          "type": "string",
          "description": "Model to use when creating the assistant"
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "assistant",
                "description": "Created/retrieved assistant object"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_THREAD": {
      "type": "object",
      "description": "Create or reuse an OpenAI thread. If cfg.reuse and cfg.storeIn are provided the thread id may be reused. The created thread is written to the flow context.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_THREAD"
          ],
          "description": "Create or reuse an OpenAI thread. If cfg.reuse and cfg.storeIn are provided the thread id may be reused. The created thread is written to the flow context."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_COMPLETION'."
        },
        "reuse": {
          "type": "boolean",
          "description": "If true and storeIn provided, try to reuse a persisted thread id"
        },
        "storeIn": {
          "type": "string",
          "description": "Storage location for persisting thread id: page/session/browser.",
          "enum": [
            "page",
            "session",
            "browser"
          ]
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "thread",
                "description": "Created thread object"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_DELETE_THREAD": {
      "type": "object",
      "description": "Delete a thread using its id. Provide the thread object or id via inputs.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_DELETE_THREAD"
          ],
          "description": "Delete a thread using its id. Provide the thread object or id via inputs."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_COMPLETION'."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "thread",
                "description": "Thread object or id to delete"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_THREAD_FILES": {
      "type": "object",
      "description": "Create a vector store for thread file tools and optionally upload files. Writes created store to flow context and optional uploaded file list to an optional output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_THREAD_FILES"
          ],
          "description": "Create a vector store for thread file tools and optionally upload files. Writes created store to flow context and optional uploaded file list to an optional output."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_COMPLETION'."
        },
        "name": {
          "type": "string",
          "description": "Name of the vector store"
        },
        "replaceStores": {
          "type": "boolean",
          "description": "If true replace existing stores instead of appending"
        },
        "expires_after": {
          "type": "number",
          "description": "Expiration configuration for the vector store"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "files",
                    "description": "Optional files to upload to the vector store"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "thread",
                    "description": "Optional thread object to update with tool resources"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "store",
                    "description": "Created vector store object"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "files",
                    "description": "Optional uploaded files metadata list"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_THREAD_MESSAGE": {
      "type": "object",
      "description": "Send a message to a thread/assistant. The assistant and thread inputs are required. The message can be provided via cfg.instruction or via default text input. The assistant response text is stored in the default text output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_THREAD_MESSAGE"
          ],
          "description": "Send a message to a thread/assistant. The assistant and thread inputs are required. The message can be provided via cfg.instruction or via default text input. The assistant response text is stored in the default text output."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_COMPLETION'."
        },
        "instruction": {
          "type": "string",
          "description": "Optional message instruction; if omitted the default text input is used."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "assistant",
                    "description": "Assistant object or id used to send the message"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "thread",
                    "description": "Thread object or id to send the message to"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Message text to send to the assistant. If omitted, reads from cfg.instruction or default text input."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Assistant response text (default text output)"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_COMPLETION": {
      "type": "object",
      "description": "Chat/completion step that calls OpenAI (or Hub/Azure variants). Supports messages, images, audio, attachments, tools (functions), structured outputs (json_schema/json_object/list), reasoning and audio output. Many step-level configuration options are supported via cfg.*.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_COMPLETION",
            "SERVICE_OPENAI_COMPLETION"
          ],
          "description": "Chat/completion step that calls OpenAI (or Hub/Azure variants). Supports messages, images, audio, attachments, tools (functions), structured outputs (json_schema/json_object/list), reasoning and audio output. Many step-level configuration options are supported via cfg.*. (Note: Aliases SERVICE_OPENAI_COMPLETION are deprecated, use 'OPENAI_COMPLETION' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_COMPLETION'."
        },
        "model": {
          "type": "string",
          "description": "Model id or deployment id to use"
        },
        "promptId": {
          "type": "string",
          "description": "Id of a stored prompt configuration to reuse"
        },
        "instruction": {
          "type": "string",
          "description": "The user prompt to send to the LLM (supports templates)"
        },
        "behavior": {
          "type": "string",
          "description": "The system prompt to send to the LLM (supports templates)"
        },
        "options": {
          "type": "object",
          "description": "Raw options that will be merged into the request payload"
        },
        "response_format": {
          "description": "Structured response format. Can be a string (\"json_object\" or \"list\") or an object with json_schema definition",
          "oneOf": [
            {
              "type": "string",
              "description": "Shorthand for common structured output formats",
              "enum": [
                "json_object",
                "list"
              ]
            },
            {
              "type": "object",
              "description": "Full JSON schema definition for structured outputs",
              "properties": {
                "name": {
                  "type": "string",
                  "description": "Schema name"
                },
                "schema": {
                  "type": "object",
                  "description": "JSON schema definition"
                },
                "strict": {
                  "type": "boolean",
                  "description": "Enable strict mode for schema adherence"
                }
              }
            }
          ]
        },
        "outputAudio": {
          "type": "object",
          "description": "Configuration object to request audio output (voice/format)"
        },
        "tool_choice": {
          "type": "string",
          "description": "Optional tool selection policy"
        },
        "maxToolIterations": {
          "type": "number",
          "description": "Maximum number of tool iterations to perform"
        },
        "afterToolResultToolChoice": {
          "type": "string",
          "description": "Behavior after a tool result (none|auto)",
          "enum": [
            "none",
            "auto"
          ]
        },
        "functions": {
          "type": "array",
          "description": "Array of function descriptors for tool calling"
        },
        "functionsReuseContext": {
          "type": "boolean",
          "description": "Whether to reuse full flowContext when executing function templates"
        },
        "safetyIdentifier": {
          "type": "string",
          "description": "Optional safety identifier for the request"
        },
        "messages": {
          "type": "array",
          "description": "Array of prior messages to include in the conversation (overrides flowContext.messages)"
        },
        "images": {
          "type": "array",
          "description": "Array or single image input(s) defined inline via cfg.images or cfg.image"
        },
        "image": {
          "type": "string",
          "description": "Single inline image config"
        },
        "audio": {
          "type": "string",
          "description": "Single inline audio config"
        },
        "audios": {
          "type": "array",
          "description": "Array of audio inputs"
        },
        "reasoning": {
          "type": "object",
          "description": "Reasoning configuration object to pass to the API (for models supporting reasoning)"
        },
        "imageDetail": {
          "type": "string",
          "description": "Image detail level for image inputs (low, high, auto)",
          "enum": [
            "low",
            "high",
            "auto"
          ],
          "default": "auto"
        },
        "audioFormat": {
          "type": "string",
          "description": "Audio format for audio inputs (e.g., wav, mp3)"
        },
        "attachments": {
          "type": "array",
          "description": "Array of generic attachments (images, audio, text) to include in the user message"
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Default textual response (setTextOutput) from the completion"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "Structured object output when using json/object response formats"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "List output extracted from structured response (response_format=list)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "audio",
                    "description": "Audio data URL or blob when requesting audio output"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "audio_transcript",
                    "description": "Optional transcript produced as part of audio response"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "response",
                    "description": "Raw API response object saved as optional output"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "reasoning",
                    "description": "Optional reasoning object provided by the model/SDK"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "choices",
                    "description": "Optional choices array (raw) from the completion"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "usage",
                    "description": "Optional usage object from the completion result"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "tool_results",
                    "description": "Optional array of tool results produced during execution"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_COMPLETION": {
      "type": "object",
      "description": "Chat/completion step that calls OpenAI (or Hub/Azure variants). Supports messages, images, audio, attachments, tools (functions), structured outputs (json_schema/json_object/list), reasoning and audio output. Many step-level configuration options are supported via cfg.*.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_COMPLETION"
          ],
          "description": "Chat/completion step that calls OpenAI (or Hub/Azure variants). Supports messages, images, audio, attachments, tools (functions), structured outputs (json_schema/json_object/list), reasoning and audio output. Many step-level configuration options are supported via cfg.*."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "model": {
          "type": "string",
          "description": "Model id or deployment id to use"
        },
        "promptId": {
          "type": "string",
          "description": "Id of a stored prompt configuration to reuse"
        },
        "instruction": {
          "type": "string",
          "description": "The user prompt to send to the LLM (supports templates)"
        },
        "behavior": {
          "type": "string",
          "description": "The system prompt to send to the LLM (supports templates)"
        },
        "options": {
          "type": "object",
          "description": "Raw options that will be merged into the request payload"
        },
        "response_format": {
          "description": "Structured response format. Can be a string (\"json_object\" or \"list\") or an object with json_schema definition",
          "oneOf": [
            {
              "type": "string",
              "description": "Shorthand for common structured output formats",
              "enum": [
                "json_object",
                "list"
              ]
            },
            {
              "type": "object",
              "description": "Full JSON schema definition for structured outputs",
              "properties": {
                "name": {
                  "type": "string",
                  "description": "Schema name"
                },
                "schema": {
                  "type": "object",
                  "description": "JSON schema definition"
                },
                "strict": {
                  "type": "boolean",
                  "description": "Enable strict mode for schema adherence"
                }
              }
            }
          ]
        },
        "outputAudio": {
          "type": "object",
          "description": "Configuration object to request audio output (voice/format)"
        },
        "tool_choice": {
          "type": "string",
          "description": "Optional tool selection policy"
        },
        "maxToolIterations": {
          "type": "number",
          "description": "Maximum number of tool iterations to perform"
        },
        "afterToolResultToolChoice": {
          "type": "string",
          "description": "Behavior after a tool result (none|auto)",
          "enum": [
            "none",
            "auto"
          ]
        },
        "functions": {
          "type": "array",
          "description": "Array of function descriptors for tool calling"
        },
        "functionsReuseContext": {
          "type": "boolean",
          "description": "Whether to reuse full flowContext when executing function templates"
        },
        "safetyIdentifier": {
          "type": "string",
          "description": "Optional safety identifier for the request"
        },
        "messages": {
          "type": "array",
          "description": "Array of prior messages to include in the conversation (overrides flowContext.messages)"
        },
        "images": {
          "type": "array",
          "description": "Array or single image input(s) defined inline via cfg.images or cfg.image"
        },
        "image": {
          "type": "string",
          "description": "Single inline image config"
        },
        "audio": {
          "type": "string",
          "description": "Single inline audio config"
        },
        "audios": {
          "type": "array",
          "description": "Array of audio inputs"
        },
        "reasoning": {
          "type": "object",
          "description": "Reasoning configuration object to pass to the API (for models supporting reasoning)"
        },
        "imageDetail": {
          "type": "string",
          "description": "Image detail level for image inputs (low, high, auto)",
          "enum": [
            "low",
            "high",
            "auto"
          ],
          "default": "auto"
        },
        "audioFormat": {
          "type": "string",
          "description": "Audio format for audio inputs (e.g., wav, mp3)"
        },
        "attachments": {
          "type": "array",
          "description": "Array of generic attachments (images, audio, text) to include in the user message"
        },
        "target": {
          "type": "string",
          "description": "Override the target used on service configuration level."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Default textual response (setTextOutput) from the completion"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "Structured object output when using json/object response formats"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "List output extracted from structured response (response_format=list)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "audio",
                    "description": "Audio data URL or blob when requesting audio output"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "audio_transcript",
                    "description": "Optional transcript produced as part of audio response"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "response",
                    "description": "Raw API response object saved as optional output"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "reasoning",
                    "description": "Optional reasoning object provided by the model/SDK"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "choices",
                    "description": "Optional choices array (raw) from the completion"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "usage",
                    "description": "Optional usage object from the completion result"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "tool_results",
                    "description": "Optional array of tool results produced during execution"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "AZURE_OPENAI_COMPLETION": {
      "type": "object",
      "description": "Chat/completion step that calls OpenAI (or Hub/Azure variants). Supports messages, images, audio, attachments, tools (functions), structured outputs (json_schema/json_object/list), reasoning and audio output. Many step-level configuration options are supported via cfg.*.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "AZURE_OPENAI_COMPLETION",
            "SERVICE_AZURE_OPENAI_COMPLETION"
          ],
          "description": "Chat/completion step that calls OpenAI (or Hub/Azure variants). Supports messages, images, audio, attachments, tools (functions), structured outputs (json_schema/json_object/list), reasoning and audio output. Many step-level configuration options are supported via cfg.*. (Note: Aliases SERVICE_AZURE_OPENAI_COMPLETION are deprecated, use 'AZURE_OPENAI_COMPLETION' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'AZURE_OPENAI_COMPLETION'."
        },
        "model": {
          "type": "string",
          "description": "Model id or deployment id to use"
        },
        "promptId": {
          "type": "string",
          "description": "Id of a stored prompt configuration to reuse"
        },
        "instruction": {
          "type": "string",
          "description": "The user prompt to send to the LLM (supports templates)"
        },
        "behavior": {
          "type": "string",
          "description": "The system prompt to send to the LLM (supports templates)"
        },
        "options": {
          "type": "object",
          "description": "Raw options that will be merged into the request payload"
        },
        "response_format": {
          "description": "Structured response format. Can be a string (\"json_object\" or \"list\") or an object with json_schema definition",
          "oneOf": [
            {
              "type": "string",
              "description": "Shorthand for common structured output formats",
              "enum": [
                "json_object",
                "list"
              ]
            },
            {
              "type": "object",
              "description": "Full JSON schema definition for structured outputs",
              "properties": {
                "name": {
                  "type": "string",
                  "description": "Schema name"
                },
                "schema": {
                  "type": "object",
                  "description": "JSON schema definition"
                },
                "strict": {
                  "type": "boolean",
                  "description": "Enable strict mode for schema adherence"
                }
              }
            }
          ]
        },
        "outputAudio": {
          "type": "object",
          "description": "Configuration object to request audio output (voice/format)"
        },
        "tool_choice": {
          "type": "string",
          "description": "Optional tool selection policy"
        },
        "maxToolIterations": {
          "type": "number",
          "description": "Maximum number of tool iterations to perform"
        },
        "afterToolResultToolChoice": {
          "type": "string",
          "description": "Behavior after a tool result (none|auto)",
          "enum": [
            "none",
            "auto"
          ]
        },
        "functions": {
          "type": "array",
          "description": "Array of function descriptors for tool calling"
        },
        "functionsReuseContext": {
          "type": "boolean",
          "description": "Whether to reuse full flowContext when executing function templates"
        },
        "safetyIdentifier": {
          "type": "string",
          "description": "Optional safety identifier for the request"
        },
        "messages": {
          "type": "array",
          "description": "Array of prior messages to include in the conversation (overrides flowContext.messages)"
        },
        "images": {
          "type": "array",
          "description": "Array or single image input(s) defined inline via cfg.images or cfg.image"
        },
        "image": {
          "type": "string",
          "description": "Single inline image config"
        },
        "audio": {
          "type": "string",
          "description": "Single inline audio config"
        },
        "audios": {
          "type": "array",
          "description": "Array of audio inputs"
        },
        "reasoning": {
          "type": "object",
          "description": "Reasoning configuration object to pass to the API (for models supporting reasoning)"
        },
        "imageDetail": {
          "type": "string",
          "description": "Image detail level for image inputs (low, high, auto)",
          "enum": [
            "low",
            "high",
            "auto"
          ],
          "default": "auto"
        },
        "audioFormat": {
          "type": "string",
          "description": "Audio format for audio inputs (e.g., wav, mp3)"
        },
        "attachments": {
          "type": "array",
          "description": "Array of generic attachments (images, audio, text) to include in the user message"
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Default textual response (setTextOutput) from the completion"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "Structured object output when using json/object response formats"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "List output extracted from structured response (response_format=list)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "audio",
                    "description": "Audio data URL or blob when requesting audio output"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "audio_transcript",
                    "description": "Optional transcript produced as part of audio response"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "response",
                    "description": "Raw API response object saved as optional output"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "reasoning",
                    "description": "Optional reasoning object provided by the model/SDK"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "choices",
                    "description": "Optional choices array (raw) from the completion"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "usage",
                    "description": "Optional usage object from the completion result"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "tool_results",
                    "description": "Optional array of tool results produced during execution"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_IMAGE_GENERATION": {
      "type": "object",
      "description": "Generate images using OpenAI image models (DALL·E and GPT-Image-1). Supports model, size, quality, count (n), response_format and other parameters. The generated images are stored in the configured output key or default `imageList`.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_IMAGE_GENERATION",
            "SERVICE_OPENAI_IMAGE_GENERATION"
          ],
          "description": "Generate images using OpenAI image models (DALL·E and GPT-Image-1). Supports model, size, quality, count (n), response_format and other parameters. The generated images are stored in the configured output key or default `imageList`. (Note: Aliases SERVICE_OPENAI_IMAGE_GENERATION are deprecated, use 'OPENAI_IMAGE_GENERATION' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_IMAGE_GENERATION'."
        },
        "model": {
          "type": "string",
          "description": "Image model to use (gpt-image-1, dall-e-3, dall-e-2)",
          "enum": [
            "gpt-image-1",
            "dall-e-3",
            "dall-e-2"
          ]
        },
        "instruction": {
          "type": "string",
          "description": "The user prompt to send to the LLM (supports templates)"
        },
        "size": {
          "type": "string",
          "description": "Desired image size (model-dependent)"
        },
        "quality": {
          "type": "string",
          "description": "Image quality setting"
        },
        "n": {
          "type": "number",
          "description": "Number of images to generate (1-4; DALL·E3 supports 1)"
        },
        "style": {
          "type": "string",
          "description": "Image style for supported models (e.g. natural, vivid)"
        },
        "response_format": {
          "type": "string",
          "description": "DALL·E response format: 'url' or 'b64_json' (gpt-image-1 returns b64_json)",
          "enum": [
            "url",
            "b64_json"
          ]
        },
        "output_format": {
          "type": "string",
          "description": "Output format for gpt-image-1 (png|jpeg|webp)",
          "enum": [
            "png",
            "jpeg",
            "webp"
          ]
        },
        "background": {
          "type": "string",
          "description": "Background setting (e.g. transparent)"
        },
        "seed": {
          "type": "number",
          "description": "Random seed for reproducible results"
        },
        "user": {
          "type": "string",
          "description": "User identifier for attribution and policy monitoring"
        },
        "timeoutMs": {
          "type": "number",
          "description": "Request timeout in milliseconds"
        },
        "maxRetries": {
          "type": "number",
          "description": "Maximum number of retries for recoverable errors"
        },
        "outputKey": {
          "type": "string",
          "description": "FlowContext key where images will be stored (default: imageList)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Prompt text input. If the step reads text input, use a preceding SET step to provide it (e.g. - step: SET text: \"...\" )."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageList",
                    "description": "Array of generated image items (each item contains url or b64_json) stored under the outputKey or default key"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "Optional metadata about the generation (model, size, attemptCount)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_IMAGE_GENERATION": {
      "type": "object",
      "description": "ImageGeneration using ProActions Hub.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_IMAGE_GENERATION"
          ],
          "description": "ImageGeneration using ProActions Hub."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "model": {
          "type": "string",
          "description": "Image model to use (gpt-image-1, dall-e-3, dall-e-2)",
          "enum": [
            "gpt-image-1",
            "dall-e-3",
            "dall-e-2"
          ]
        },
        "instruction": {
          "type": "string",
          "description": "The user prompt to send to the LLM (supports templates)"
        },
        "size": {
          "type": "string",
          "description": "Desired image size (model-dependent)"
        },
        "quality": {
          "type": "string",
          "description": "Image quality setting"
        },
        "n": {
          "type": "number",
          "description": "Number of images to generate (1-4; DALL·E3 supports 1)"
        },
        "style": {
          "type": "string",
          "description": "Image style for supported models (e.g. natural, vivid)"
        },
        "response_format": {
          "type": "string",
          "description": "DALL·E response format: 'url' or 'b64_json' (gpt-image-1 returns b64_json)",
          "enum": [
            "url",
            "b64_json"
          ]
        },
        "output_format": {
          "type": "string",
          "description": "Output format for gpt-image-1 (png|jpeg|webp)",
          "enum": [
            "png",
            "jpeg",
            "webp"
          ]
        },
        "background": {
          "type": "string",
          "description": "Background setting (e.g. transparent)"
        },
        "seed": {
          "type": "number",
          "description": "Random seed for reproducible results"
        },
        "user": {
          "type": "string",
          "description": "User identifier for attribution and policy monitoring"
        },
        "timeoutMs": {
          "type": "number",
          "description": "Request timeout in milliseconds"
        },
        "maxRetries": {
          "type": "number",
          "description": "Maximum number of retries for recoverable errors"
        },
        "outputKey": {
          "type": "string",
          "description": "FlowContext key where images will be stored (default: imageList)"
        },
        "target": {
          "type": "string",
          "description": "Override the target used on service configuration level."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Prompt text input. If the step reads text input, use a preceding SET step to provide it (e.g. - step: SET text: \"...\" )."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageList",
                    "description": "Array of generated image items (each item contains url or b64_json) stored under the outputKey or default key"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "Optional metadata about the generation (model, size, attemptCount)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "AZURE_OPENAI_IMAGE_GENERATION": {
      "type": "object",
      "description": "ImageGeneration using Azure OpenAI.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "AZURE_OPENAI_IMAGE_GENERATION",
            "SERVICE_AZURE_OPENAI_IMAGE_GENERATION"
          ],
          "description": "ImageGeneration using Azure OpenAI. (Note: Aliases SERVICE_AZURE_OPENAI_IMAGE_GENERATION are deprecated, use 'AZURE_OPENAI_IMAGE_GENERATION' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'AZURE_OPENAI_IMAGE_GENERATION'."
        },
        "model": {
          "type": "string",
          "description": "Image model to use (gpt-image-1, dall-e-3, dall-e-2)",
          "enum": [
            "gpt-image-1",
            "dall-e-3",
            "dall-e-2"
          ]
        },
        "instruction": {
          "type": "string",
          "description": "The user prompt to send to the LLM (supports templates)"
        },
        "size": {
          "type": "string",
          "description": "Desired image size (model-dependent)"
        },
        "quality": {
          "type": "string",
          "description": "Image quality setting"
        },
        "n": {
          "type": "number",
          "description": "Number of images to generate (1-4; DALL·E3 supports 1)"
        },
        "style": {
          "type": "string",
          "description": "Image style for supported models (e.g. natural, vivid)"
        },
        "response_format": {
          "type": "string",
          "description": "DALL·E response format: 'url' or 'b64_json' (gpt-image-1 returns b64_json)",
          "enum": [
            "url",
            "b64_json"
          ]
        },
        "output_format": {
          "type": "string",
          "description": "Output format for gpt-image-1 (png|jpeg|webp)",
          "enum": [
            "png",
            "jpeg",
            "webp"
          ]
        },
        "background": {
          "type": "string",
          "description": "Background setting (e.g. transparent)"
        },
        "seed": {
          "type": "number",
          "description": "Random seed for reproducible results"
        },
        "user": {
          "type": "string",
          "description": "User identifier for attribution and policy monitoring"
        },
        "timeoutMs": {
          "type": "number",
          "description": "Request timeout in milliseconds"
        },
        "maxRetries": {
          "type": "number",
          "description": "Maximum number of retries for recoverable errors"
        },
        "outputKey": {
          "type": "string",
          "description": "FlowContext key where images will be stored (default: imageList)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Prompt text input. If the step reads text input, use a preceding SET step to provide it (e.g. - step: SET text: \"...\" )."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageList",
                    "description": "Array of generated image items (each item contains url or b64_json) stored under the outputKey or default key"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "Optional metadata about the generation (model, size, attemptCount)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_SPEECH": {
      "type": "object",
      "description": "Generate speech audio using the OpenAI Speech API (or corresponding Hub/Azure variants). Reads text from the default text input and returns an audio blob.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_SPEECH",
            "SERVICE_OPENAI_SPEECH"
          ],
          "description": "Generate speech audio using the OpenAI Speech API (or corresponding Hub/Azure variants). Reads text from the default text input and returns an audio blob. (Note: Aliases SERVICE_OPENAI_SPEECH are deprecated, use 'OPENAI_SPEECH' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_SPEECH'."
        },
        "model": {
          "type": "string",
          "description": "Model id to use for TTS (e.g. tts-1)"
        },
        "voice": {
          "type": "string",
          "description": "Voice id to use for synthesis"
        },
        "response_format": {
          "type": "string",
          "description": "Response audio format (e.g. mp3, wav, pcm)"
        },
        "speed": {
          "type": "number",
          "description": "Playback speed multiplier (e.g. 1.0)"
        },
        "mime_type": {
          "type": "string",
          "description": "Optional MIME type for the produced blob (e.g. audio/mpeg)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text to synthesize. If omitted, the default text input is used."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Generated audio blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_SPEECH": {
      "type": "object",
      "description": "ProActionsHub-compatible speech generation step. Inherits behavior from OPENAI_SPEECH.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_SPEECH"
          ],
          "description": "ProActionsHub-compatible speech generation step. Inherits behavior from OPENAI_SPEECH."
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "model": {
          "type": "string",
          "description": "Model id to use for TTS (e.g. tts-1)"
        },
        "voice": {
          "type": "string",
          "description": "Voice id to use for synthesis"
        },
        "response_format": {
          "type": "string",
          "description": "Response audio format (e.g. mp3, wav, pcm)"
        },
        "speed": {
          "type": "number",
          "description": "Playback speed multiplier (e.g. 1.0)"
        },
        "mime_type": {
          "type": "string",
          "description": "Optional MIME type for the produced blob (e.g. audio/mpeg)"
        },
        "target": {
          "type": "string",
          "description": "Override the target used on service configuration level."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text to synthesize. If omitted, the default text input is used."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Generated audio blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "AZURE_OPENAI_SPEECH": {
      "type": "object",
      "description": "Azure OpenAI speech step alias.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "AZURE_OPENAI_SPEECH",
            "SERVICE_AZURE_OPENAI_SPEECH"
          ],
          "description": "Azure OpenAI speech step alias. (Note: Aliases SERVICE_AZURE_OPENAI_SPEECH are deprecated, use 'AZURE_OPENAI_SPEECH' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'AZURE_OPENAI_SPEECH'."
        },
        "model": {
          "type": "string",
          "description": "Model id to use for TTS (e.g. tts-1)"
        },
        "voice": {
          "type": "string",
          "description": "Voice id to use for synthesis"
        },
        "response_format": {
          "type": "string",
          "description": "Response audio format (e.g. mp3, wav, pcm)"
        },
        "speed": {
          "type": "number",
          "description": "Playback speed multiplier (e.g. 1.0)"
        },
        "mime_type": {
          "type": "string",
          "description": "Optional MIME type for the produced blob (e.g. audio/mpeg)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Text to synthesize. If omitted, the default text input is used."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Generated audio blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "OPENAI_TRANSCRIPTION": {
      "type": "object",
      "description": "Transcribe audio using OpenAI (or Azure/Hub variants). Reads an audio file/blob from inputs and writes the transcription text and optional segment list.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "OPENAI_TRANSCRIPTION",
            "SERVICE_OPENAI_TRANSCRIPTION"
          ],
          "description": "Transcribe audio using OpenAI (or Azure/Hub variants). Reads an audio file/blob from inputs and writes the transcription text and optional segment list. (Note: Aliases SERVICE_OPENAI_TRANSCRIPTION are deprecated, use 'OPENAI_TRANSCRIPTION' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'OPENAI_TRANSCRIPTION'."
        },
        "instruction": {
          "type": "string",
          "description": "Optional prompt/instruction to bias transcription."
        },
        "language": {
          "type": "string",
          "description": "Optional language code to hint at language for transcription."
        },
        "temperature": {
          "type": "number",
          "description": "Optional temperature / randomness parameter for transcription model."
        },
        "model": {
          "type": "string",
          "description": "Optional model override (e.g. 'whisper-1')."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "file",
                "description": "Audio file or Blob to transcribe"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "The full transcribed text"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "Optional list of segment texts"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "HUB_TRANSCRIPTION": {
      "type": "object",
      "description": "Hub-compatible transcription step (delegates to hub service). Writes transcription text and optional segments list.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HUB_TRANSCRIPTION",
            "SERVICE_OPENAI_TRANSCRIPTION"
          ],
          "description": "Hub-compatible transcription step (delegates to hub service). Writes transcription text and optional segments list. (Note: Aliases SERVICE_OPENAI_TRANSCRIPTION are deprecated, use 'HUB_TRANSCRIPTION' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'HUB'."
        },
        "instruction": {
          "type": "string",
          "description": "Optional prompt/instruction to bias transcription."
        },
        "language": {
          "type": "string",
          "description": "Optional language code to hint at language for transcription."
        },
        "temperature": {
          "type": "number",
          "description": "Optional temperature / randomness parameter for transcription model."
        },
        "model": {
          "type": "string",
          "description": "Optional model override (e.g. 'whisper-1')."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "file",
                "description": "Audio file or Blob to transcribe"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "The full transcribed text"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "Optional list of segment texts"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "AZURE_OPENAI_TRANSCRIPTION": {
      "type": "object",
      "description": "Azure OpenAI transcription alias (delegates to the OpenAI transcription implementation).",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "AZURE_OPENAI_TRANSCRIPTION",
            "SERVICE_AZURE_OPENAI_TRANSCRIPTION"
          ],
          "description": "Azure OpenAI transcription alias (delegates to the OpenAI transcription implementation). (Note: Aliases SERVICE_AZURE_OPENAI_TRANSCRIPTION are deprecated, use 'AZURE_OPENAI_TRANSCRIPTION' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'AZURE_OPENAI_TRANSCRIPTION'."
        },
        "instruction": {
          "type": "string",
          "description": "Optional prompt/instruction to bias transcription."
        },
        "language": {
          "type": "string",
          "description": "Optional language code to hint at language for transcription."
        },
        "temperature": {
          "type": "number",
          "description": "Optional temperature / randomness parameter for transcription model."
        },
        "model": {
          "type": "string",
          "description": "Optional model override (e.g. 'whisper-1')."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "file",
                "description": "Audio file or Blob to transcribe"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "The full transcribed text"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "list",
                    "description": "Optional list of segment texts"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "REST": {
      "type": "object",
      "description": "Generic REST step. Builds a Request from the given configuration (url, method, headers, parameters, body or formData) and executes it using the host HTTP helper. Results are written to cfg.outputs when provided or to the default text output.",
      "required": [
        "step",
        "url"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "REST",
            "SERVICE_REST"
          ],
          "description": "Generic REST step. Builds a Request from the given configuration (url, method, headers, parameters, body or formData) and executes it using the host HTTP helper. Results are written to cfg.outputs when provided or to the default text output. (Note: Aliases SERVICE_REST are deprecated, use 'REST' instead)"
        },
        "url": {
          "type": "string",
          "description": "The URL to call. Supports template expressions and variable resolution."
        },
        "method": {
          "type": "string",
          "description": "HTTP method to use (GET, POST, PUT, DELETE, ...). Default: GET.",
          "enum": [
            "GET",
            "POST",
            "PUT",
            "DELETE",
            "PATCH",
            "HEAD",
            "OPTIONS"
          ],
          "default": "GET"
        },
        "headers": {
          "type": "object",
          "description": "Optional headers object. Values will be resolved against the flow context."
        },
        "parameters": {
          "type": "object",
          "description": "Optional query parameters object. Values will be resolved against the flow context."
        },
        "body": {
          "description": "Optional request body. May be a string or object. Objects are JSON-stringified by the step.",
          "oneOf": [
            {
              "type": "string"
            },
            {
              "type": "object"
            }
          ]
        },
        "formData": {
          "type": "object",
          "description": "Optional formData object. Supports File/Blob entries and arrays. Values are resolved against the flow context."
        },
        "requestOptions": {
          "type": "object",
          "description": "Optional low-level RequestInit overrides passed to the fetch helper."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "Default textual response when no outputs are configured"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "json",
                    "description": "Parsed JSON response (if requested via outputs)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "blob",
                    "description": "Binary blob response (if requested via outputs)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "bytes",
                    "description": "Raw bytes as Uint8Array (if requested via outputs)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "arrayBuffer",
                    "description": "ArrayBuffer response (if requested via outputs)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "object",
                    "description": "Full Response object (if requested via outputs)"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "STABILITY_AI_UPSCALE": {
      "type": "object",
      "description": "Upscale an image using StabilityAI upscale endpoint. Uploads a provided image blob and returns the upscaled image blob.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "STABILITY_AI_UPSCALE",
            "SERVICE_STABILITY_AI_UPSCALE"
          ],
          "description": "Upscale an image using StabilityAI upscale endpoint. Uploads a provided image blob and returns the upscaled image blob. (Note: Aliases SERVICE_STABILITY_AI_UPSCALE are deprecated, use 'STABILITY_AI_UPSCALE' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'STABILITY_AI'."
        },
        "prompt": {
          "type": "string",
          "description": "Optional prompt describing desired changes to the image"
        },
        "output_format": {
          "type": "string",
          "description": "Desired output format (e.g. jpeg, png)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "blob",
                    "description": "Input image blob to upscale"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageName",
                    "description": "Input image name"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Upscaled image blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "STABILITY_AI_OUTPAINT": {
      "type": "object",
      "description": "Outpaint an image region using StabilityAI. Supports coordinates (left,right,up,down) to define the outpainting area.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "STABILITY_AI_OUTPAINT",
            "SERVICE_STABILITY_AI_OUTPAINT"
          ],
          "description": "Outpaint an image region using StabilityAI. Supports coordinates (left,right,up,down) to define the outpainting area. (Note: Aliases SERVICE_STABILITY_AI_OUTPAINT are deprecated, use 'STABILITY_AI_OUTPAINT' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'STABILITY_AI'."
        },
        "left": {
          "type": "number",
          "description": "Pixels to the left (default 0)"
        },
        "right": {
          "type": "number",
          "description": "Pixels to the right (default 0)"
        },
        "up": {
          "type": "number",
          "description": "Pixels to the top (default 0)"
        },
        "down": {
          "type": "number",
          "description": "Pixels to the bottom (default 0)"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "blob",
                    "description": "Input image blob to outpaint"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageName",
                    "description": "Input image name"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Outpainted image blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "STABILITY_AI_SEARCH_AND_REPLACE": {
      "type": "object",
      "description": "Search and replace regions or objects within an image using StabilityAI. Provide a prompt and a search_prompt to locate and replace visual elements.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "STABILITY_AI_SEARCH_AND_REPLACE",
            "SERVICE_STABILITY_AI_SEARCH_AND_REPLACE"
          ],
          "description": "Search and replace regions or objects within an image using StabilityAI. Provide a prompt and a search_prompt to locate and replace visual elements. (Note: Aliases SERVICE_STABILITY_AI_SEARCH_AND_REPLACE are deprecated, use 'STABILITY_AI_SEARCH_AND_REPLACE' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'STABILITY_AI'."
        },
        "prompt": {
          "type": "string",
          "description": "Prompt describing replacement target or desired result"
        },
        "search_prompt": {
          "type": "string",
          "description": "Prompt describing what to search for in the image"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "blob",
                    "description": "Input image blob to process"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageName",
                    "description": "Input image name"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Processed image blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "STABILITY_AI_SEARCH_AND_RECOLOR": {
      "type": "object",
      "description": "Search and recolor elements in an image using StabilityAI. Provide select_prompt and prompt to find and recolor elements.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "STABILITY_AI_SEARCH_AND_RECOLOR",
            "SERVICE_STABILITY_AI_SEARCH_AND_RECOLOR"
          ],
          "description": "Search and recolor elements in an image using StabilityAI. Provide select_prompt and prompt to find and recolor elements. (Note: Aliases SERVICE_STABILITY_AI_SEARCH_AND_RECOLOR are deprecated, use 'STABILITY_AI_SEARCH_AND_RECOLOR' instead)"
        },
        "service": {
          "type": "string",
          "description": "Optional service name override. If not specified, defaults to 'STABILITY_AI'."
        },
        "select_prompt": {
          "type": "string",
          "description": "Prompt to select elements to recolor"
        },
        "prompt": {
          "type": "string",
          "description": "Prompt describing the recoloring operation"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "blob",
                    "description": "Input image blob to recolor"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "imageName",
                    "description": "Input image name"
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to read the input value from"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "blob",
                "description": "Recolored image blob"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "READ_CLIPBOARD": {
      "type": "object",
      "description": "Reads content from the user clipboard. Prefers HTML content when available, falls back to plain text.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "READ_CLIPBOARD"
          ],
          "description": "Reads content from the user clipboard. Prefers HTML content when available, falls back to plain text."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "The clipboard content (HTML or plain text) stored in the default text output."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "WRITE_CLIPBOARD": {
      "type": "object",
      "description": "Writes text to the user clipboard. Reads the content from the configured input (cfg.text) or from the default text input in the flow context.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "WRITE_CLIPBOARD"
          ],
          "description": "Writes text to the user clipboard. Reads the content from the configured input (cfg.text) or from the default text input in the flow context."
        },
        "text": {
          "type": "string",
          "description": "Text to write to clipboard. If omitted, uses the default text input from flow context."
        }
      },
      "additionalProperties": false
    },
    "DOWNLOAD": {
      "type": "object",
      "description": "Downloads a file constructed from text or an existing Blob. The filename is required.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "DOWNLOAD",
            "DOWNLOAD_TEXT"
          ],
          "description": "Downloads a file constructed from text or an existing Blob. The filename is required. (Note: Aliases DOWNLOAD_TEXT are deprecated, use 'DOWNLOAD' instead)"
        },
        "filename": {
          "type": "string",
          "description": "Name of the file to download (required)."
        },
        "contentType": {
          "type": "string",
          "description": "The content-type to set for the download of the file.",
          "default": "text/plain"
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "filename",
                "description": "The filename to be used - use cfg.filename instead"
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "FILE_UPLOAD": {
      "type": "object",
      "description": "Opens a file upload dialog (modal) and stores selected file(s) into the flow context. Supports output mapping via the `outputs` config or default file output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "FILE_UPLOAD",
            "USER_FILE_UPLOAD"
          ],
          "description": "Opens a file upload dialog (modal) and stores selected file(s) into the flow context. Supports output mapping via the `outputs` config or default file output. (Note: Aliases USER_FILE_UPLOAD are deprecated, use 'FILE_UPLOAD' instead)"
        },
        "promptText": {
          "type": "string",
          "description": "Prompt text shown in the upload dialog."
        },
        "accept": {
          "type": "string",
          "description": "Comma-separated list of accepted MIME types for upload (e.g. \"image/png,application/pdf\")."
        },
        "fileTypes": {
          "type": "array",
          "description": "Array of accepted MIME types for the upload (e.g. [\"audio/mp3\",\"audio/wav\"])."
        },
        "maxSize": {
          "type": "number",
          "description": "Maximum file size allowed in bytes."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "oneOf": [
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "file",
                    "description": "Default output: the uploaded File object (when no explicit outputs are configured)."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "text",
                    "description": "If configured, file content can be read and stored as text in the configured output name."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              },
              {
                "type": "object",
                "required": [
                  "type",
                  "name"
                ],
                "properties": {
                  "type": {
                    "type": "string",
                    "const": "arrayBuffer",
                    "description": "If configured, file content can be read as an ArrayBuffer into the configured output name."
                  },
                  "name": {
                    "type": "string",
                    "description": "Flow context key to store the output value in"
                  }
                },
                "additionalProperties": false
              }
            ]
          }
        }
      },
      "additionalProperties": false
    },
    "PROMPT": {
      "type": "object",
      "description": "Shows a simple prompt to the user and stores the entered text into the default text output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "PROMPT",
            "USER_PROMPT"
          ],
          "description": "Shows a simple prompt to the user and stores the entered text into the default text output. (Note: Aliases USER_PROMPT are deprecated, use 'PROMPT' instead)"
        },
        "promptText": {
          "type": "string",
          "description": "Text displayed in the prompt. Can contain template placeholders resolved against the flowContext."
        },
        "placeholder": {
          "type": "string",
          "description": "Optional placeholder shown inside the input field."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "The text entered by the user is written to the default text output (or the configured output variable)."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "FORM": {
      "type": "object",
      "description": "Displays a configurable form to the user (via FormBuilder). The form definition is provided in `cfg.form`. When submitted, the returned values are merged into the flowContext.",
      "required": [
        "step",
        "form"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "FORM"
          ],
          "description": "Displays a configurable form to the user (via FormBuilder). The form definition is provided in `cfg.form`. When submitted, the returned values are merged into the flowContext."
        },
        "title": {
          "type": "string",
          "description": "Title of the form modal."
        },
        "form": {
          "type": "object",
          "description": "Form field definitions. Each key is the field name, and the value is a FormComponent object defining the field type and properties. See the comprehensive form documentation for all available field types and properties.",
          "additionalProperties": {
            "oneOf": [
              {
                "$ref": "https://catalog.lintel.tools/schemas/schemastore/proactions-ai-kit-configuration-for-templates/_shared/latest--partial-step.form.schema.json#/$defs/formComponent"
              },
              {
                "$ref": "https://catalog.lintel.tools/schemas/schemastore/proactions-ai-kit-configuration-for-templates/_shared/latest--partial-step.form.schema.json#/$defs/formGroup"
              }
            ]
          }
        },
        "buttons": {
          "type": "array",
          "description": "Optional buttons configuration for the form modal. Array of button objects defining submit/cancel buttons.",
          "minItems": 1,
          "maxItems": 10,
          "items": {
            "type": "object",
            "description": "Form button configuration",
            "properties": {
              "type": {
                "type": "string",
                "description": "Button type",
                "enum": [
                  "submit",
                  "cancel"
                ]
              },
              "label": {
                "type": "string",
                "description": "Button label text"
              },
              "name": {
                "type": "string",
                "description": "Button name attribute (used when type is submit)"
              },
              "title": {
                "type": "string",
                "description": "Button title attribute (tooltip)"
              },
              "class": {
                "type": "string",
                "description": "CSS class for button styling (e.g., \"btn btn-primary\")"
              },
              "flowAttribute": {
                "type": "string",
                "description": "Flow context variable name to store the button name/value when clicked"
              }
            },
            "required": [
              "type"
            ]
          }
        },
        "formConfig": {
          "type": "object",
          "description": "Optional modal-level configuration (width, height, fullScreen, typography). See FormModalConfig for all options.",
          "properties": {
            "dialogSize": {
              "type": "string",
              "description": "Bootstrap dialog size: \"sm\", \"md\", \"lg\", \"xl\""
            },
            "width": {
              "type": "string",
              "description": "Modal width (e.g., \"720px\", \"80vw\")"
            },
            "maxWidth": {
              "type": "string",
              "description": "Maximum modal width"
            },
            "height": {
              "type": "string",
              "description": "Modal height (e.g., \"600px\", \"80vh\")"
            },
            "maxHeight": {
              "type": "string",
              "description": "Maximum modal height"
            },
            "fullScreen": {
              "type": "boolean",
              "description": "Force modal to cover entire viewport"
            },
            "bodyFontSize": {
              "type": "string",
              "description": "Font size for modal body"
            },
            "bodyLineHeight": {
              "type": "string",
              "description": "Line height for modal body"
            },
            "labelFontSize": {
              "type": "string",
              "description": "Font size for form labels"
            },
            "inputFontSize": {
              "type": "string",
              "description": "Font size for form inputs"
            },
            "inputLineHeight": {
              "type": "string",
              "description": "Line height for form inputs"
            },
            "diffFontSize": {
              "type": "string",
              "description": "Font size for diff components"
            },
            "diffLineHeight": {
              "type": "string",
              "description": "Line height for diff components"
            },
            "modalClass": {
              "type": "string",
              "description": "Additional CSS class for modal"
            }
          }
        },
        "inlineSteps": {
          "type": "array",
          "description": "Optional steps to execute before showing the form while an in-place loading UI is shown.",
          "items": {
            "$ref": "#"
          }
        }
      },
      "additionalProperties": false
    },
    "PLAY_AUDIO": {
      "type": "object",
      "description": "Plays audio in a lightweight floating player. Audio source can be provided as a data URL, http(s) URL, or raw base64 via `cfg.in` or via the default text input. The step mounts a closed Shadow DOM audio player and resolves when the player is closed by the user.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "PLAY_AUDIO"
          ],
          "description": "Plays audio in a lightweight floating player. Audio source can be provided as a data URL, http(s) URL, or raw base64 via `cfg.in` or via the default text input. The step mounts a closed Shadow DOM audio player and resolves when the player is closed by the user."
        },
        "in": {
          "type": "string",
          "description": "Optional input path or variable name containing the audio source (data URL, http(s) URL or raw base64). If omitted, the default text input is used."
        },
        "autoplay": {
          "type": "boolean",
          "description": "Attempt autoplay on load. Note: browsers may block autoplay; in that case the player will show a hint and wait for user interaction.",
          "default": true
        },
        "showProgress": {
          "type": "boolean",
          "description": "Show progress / seek bar in the player.",
          "default": true
        },
        "showVolume": {
          "type": "boolean",
          "description": "Show volume controls in the player.",
          "default": true
        },
        "showTimeDisplay": {
          "type": "boolean",
          "description": "Show current time and duration labels.",
          "default": true
        },
        "theme": {
          "type": "string",
          "description": "Theme of the player: 'dark' | 'light' or any valid CSS color string.",
          "default": "dark"
        },
        "position": {
          "type": "string",
          "description": "Player anchoring position: 'top-left' | 'top-right' | 'bottom-left' | 'bottom-right' | 'top-center' | 'bottom-center' | 'center'.",
          "default": "center"
        },
        "loop": {
          "type": "boolean",
          "description": "If true, loop playback.",
          "default": false
        },
        "title": {
          "type": "string",
          "description": "Optional title shown in the player title bar."
        },
        "mimeType": {
          "type": "string",
          "description": "Optional MIME type hint used when the provided input is raw base64 without a data URL header (e.g. \"audio/mpeg\")."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "Audio source as data URL, http(s) URL, or raw base64. If omitted, reads from `in` config or default text input."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "USER_SELECT": {
      "type": "object",
      "description": "Displays a selection modal allowing the user to pick one item from a list. The list may be provided directly or produced by a pre-processing step (inlineSteps). The selected value is written to the default text output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "USER_SELECT"
          ],
          "description": "Displays a selection modal allowing the user to pick one item from a list. The list may be provided directly or produced by a pre-processing step (inlineSteps). The selected value is written to the default text output."
        },
        "inlineSteps": {
          "type": "array",
          "description": "Optional steps to execute before showing the selection modal. Useful to prepare or transform input.",
          "items": {
            "$ref": "#"
          }
        },
        "promptText": {
          "type": "string",
          "description": "Prompt or title text shown at the top of the selection modal (supports variable resolution)."
        },
        "infoTitle": {
          "type": "string",
          "description": "Optional title shown above additional info in the selection modal."
        },
        "infoText": {
          "type": "string",
          "description": "Optional descriptive text shown in the selection modal (supports variable resolution)."
        },
        "enableKeyboardControl": {
          "type": "boolean",
          "description": "Enable keyboard navigation in the selection modal."
        },
        "inputs": {
          "type": "array",
          "description": "Input mappings from flow context to step inputs",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "list",
                "description": "List of options for the user to select from. If omitted, converts text input to a list automatically."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to read the input value from"
              }
            },
            "additionalProperties": false
          }
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "text",
                "description": "The selected option (string) stored in the default text output."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "IMAGE_PICKER": {
      "type": "object",
      "description": "Shows an image picker modal allowing the user to pick one image from a provided list of image objects ({ url, previewUrl?, label? }). The selected image object is stored as an output.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "IMAGE_PICKER",
            "USER_IMAGE_PICKER"
          ],
          "description": "Shows an image picker modal allowing the user to pick one image from a provided list of image objects ({ url, previewUrl?, label? }). The selected image object is stored as an output. (Note: Aliases USER_IMAGE_PICKER are deprecated, use 'IMAGE_PICKER' instead)"
        },
        "promptText": {
          "type": "string",
          "description": "Prompt text shown in the image picker modal."
        },
        "imageList": {
          "type": "array",
          "description": "List of image objects to show. Each item should contain at least a `url` and optionally `previewUrl` and `label`."
        },
        "outputs": {
          "type": "array",
          "description": "Output mappings from step outputs to flow context",
          "items": {
            "type": "object",
            "required": [
              "type",
              "name"
            ],
            "properties": {
              "type": {
                "type": "string",
                "const": "image",
                "description": "Selected image object (includes url, previewUrl?, label?)."
              },
              "name": {
                "type": "string",
                "description": "Flow context key to store the output value in"
              }
            },
            "additionalProperties": false
          }
        }
      },
      "additionalProperties": false
    },
    "SHOW_PROGRESS": {
      "type": "object",
      "description": "Show a progress bar with optional status text. Use `position` to place it and `autoHide` to auto-hide after completion.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SHOW_PROGRESS"
          ],
          "description": "Show a progress bar with optional status text. Use `position` to place it and `autoHide` to auto-hide after completion."
        },
        "status": {
          "type": "string",
          "description": "Status text to display next to the progress bar. Supports variable resolution."
        },
        "position": {
          "type": "string",
          "description": "Position of the progress bar. Common values: 'bottom', 'top'.",
          "enum": [
            "bottom",
            "top",
            "left",
            "right"
          ]
        },
        "autoHide": {
          "type": "boolean",
          "description": "Whether the progress bar should auto-hide when completed."
        }
      },
      "additionalProperties": false
    },
    "UPDATE_PROGRESS": {
      "type": "object",
      "description": "Update an existing progress bar. Provide percentage (0-100) and optional status text.",
      "required": [
        "step",
        "percentage"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "UPDATE_PROGRESS"
          ],
          "description": "Update an existing progress bar. Provide percentage (0-100) and optional status text."
        },
        "status": {
          "type": "string",
          "description": "Status text to update on the progress bar. Supports variable resolution."
        },
        "percentage": {
          "description": "Progress percentage (0-100). Can be a number or a string that resolves to a number.",
          "oneOf": [
            {
              "type": "number"
            },
            {
              "type": "string"
            }
          ]
        }
      },
      "additionalProperties": false
    },
    "HIDE_PROGRESS": {
      "type": "object",
      "description": "Hide any shown progress bar.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "HIDE_PROGRESS"
          ],
          "description": "Hide any shown progress bar."
        }
      },
      "additionalProperties": false
    },
    "SLEEP": {
      "type": "object",
      "description": "Pause execution for a given delay (ms). Useful for pacing flows or waiting for external state changes.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "SLEEP"
          ],
          "description": "Pause execution for a given delay (ms). Useful for pacing flows or waiting for external state changes."
        },
        "delay": {
          "type": "number",
          "description": "Delay in milliseconds to sleep. Defaults to 1000 ms."
        }
      },
      "additionalProperties": false
    },
    "CLEAR_SELECTION": {
      "type": "object",
      "description": "Clears the current text selection and optionally repositions the cursor to the start or end of the selection. Available in Swing editor context only.",
      "required": [
        "step"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "CLEAR_SELECTION"
          ],
          "description": "Clears the current text selection and optionally repositions the cursor to the start or end of the selection. Available in Swing editor context only."
        },
        "cursorPosition": {
          "type": "string",
          "description": "Where to move the cursor after clearing selection. Use \"anchorStart\" to move to the start or \"anchorEnd\" to move to the end.",
          "enum": [
            "anchorStart",
            "anchorEnd"
          ]
        }
      },
      "additionalProperties": false
    },
    "CHANGE_VIEW_SIZE": {
      "type": "object",
      "description": "Requests a change of the view port size. Useful to resize the command palette view in Prime 8.",
      "required": [
        "step",
        "width",
        "height"
      ],
      "properties": {
        "step": {
          "type": "string",
          "enum": [
            "CHANGE_VIEW_SIZE"
          ],
          "description": "Requests a change of the view port size. Useful to resize the command palette view in Prime 8."
        },
        "width": {
          "type": "string",
          "description": "Target width in pixels. Can be a number or an expression resolved against the flowContext."
        },
        "height": {
          "type": "string",
          "description": "Target height in pixels. Can be a number or an expression resolved against the flowContext."
        }
      },
      "additionalProperties": false
    }
  },
  "discriminator": {
    "propertyName": "step"
  },
  "required": [
    "step"
  ]
}
