-
- Trigger
-
- {log.trigger ? (
-
- ) : (
-
- —
-
+ {/* Details Section */}
+
+ {/* Run ID — click to copy */}
+ {log.executionId && (
+
{
+ navigator.clipboard.writeText(log.executionId!)
+ if (copiedRunIdTimerRef.current) clearTimeout(copiedRunIdTimerRef.current)
+ setCopiedRunId(true)
+ copiedRunIdTimerRef.current = window.setTimeout(
+ () => setCopiedRunId(false),
+ 1500
+ )
+ }}
+ >
+
+ Run ID
+
+
+ {copiedRunId ? 'Copied!' : log.executionId}
+
+
)}
-
- {/* Duration */}
-
-
- Duration
-
-
- {formatDuration(log.duration, { precision: 2 }) || '—'}
-
-
+ {/* Level */}
+
+
+ Level
+
+
+
- {/* Version */}
- {log.deploymentVersion && (
-
-
- Version
+ {/* Trigger */}
+
+
+ Trigger
-
-
- {log.deploymentVersionName || `v${log.deploymentVersion}`}
+ {log.trigger ? (
+
+ ) : (
+
+ —
-
+ )}
- )}
-
- {/* Workflow State */}
- {isWorkflowExecutionLog &&
- log.executionId &&
- log.trigger !== 'mothership' &&
- !permissionConfig.hideTraceSpans && (
-
+ {/* Duration */}
+
- Workflow State
+ Duration
+
+
+ {formatDuration(log.duration, { precision: 2 }) || '—'}
- setIsExecutionSnapshotOpen(true)}
- className='flex w-full items-center justify-between px-2.5 py-1.5'
- >
- View Snapshot
-
-
- )}
- {/* Workflow Output */}
- {isWorkflowExecutionLog && workflowOutput && !permissionConfig.hideTraceSpans && (
-
-
- Workflow Output
-
-
+ {/* Version */}
+ {log.deploymentVersion && (
+
+
+ Version
+
+
+
+ {log.deploymentVersionName || `v${log.deploymentVersion}`}
+
+
+
+ )}
+
+ {/* Snapshot */}
+ {showWorkflowState && (
+
+
+ Snapshot
+
+ setIsExecutionSnapshotOpen(true)}
+ >
+
+ View Snapshot
+
+
+ )}
- )}
- {/* Workflow Execution - Trace Spans */}
- {isWorkflowExecutionLog &&
- log.executionData?.traceSpans &&
- !permissionConfig.hideTraceSpans && (
-
+ {/* Workflow Input */}
+ {isWorkflowExecutionLog && workflowInput && !permissionConfig.hideTraceSpans && (
+
- Trace Span
+ Workflow Input
-
+
)}
- {/* Files */}
- {log.files && log.files.length > 0 && (
-
- )}
-
- {/* Cost Breakdown */}
- {hasCostInfo && (
-
-
- Cost Breakdown
-
-
-
-
-
-
- Base Run:
-
-
- {formatCost(BASE_EXECUTION_CHARGE)}
-
-
-
-
- Model Input:
-
-
- {formatCost(log.cost?.input || 0)}
-
-
-
-
- Model Output:
-
-
- {formatCost(log.cost?.output || 0)}
-
-
- {(() => {
- const models = (log.cost as Record
)?.models as
- | Record
- | undefined
- const totalToolCost = models
- ? Object.values(models).reduce((sum, m) => sum + (m?.toolCost || 0), 0)
- : 0
- return totalToolCost > 0 ? (
-
-
- Tool Usage:
-
-
- {formatCost(totalToolCost)}
-
-
- ) : null
- })()}
-
+ {/* Workflow Output */}
+ {isWorkflowExecutionLog && workflowOutput && !permissionConfig.hideTraceSpans && (
+
+
+ Workflow Output
+
+
+
+ )}
-
+ {/* Files */}
+ {log.files && log.files.length > 0 && (
+
+ )}
-
-
-
- Total:
-
-
- {formatCost(log.cost?.total || 0)}
-
-
-
-
- Tokens:
-
-
- {log.cost?.tokens?.input || log.cost?.tokens?.prompt || 0} in /{' '}
- {log.cost?.tokens?.output || log.cost?.tokens?.completion || 0} out
-
-
+ {/* Cost Breakdown */}
+ {hasCostInfo && (
+
+
+
+ Base Run
+
+
+ {formatCost(BASE_EXECUTION_CHARGE)}
+
+
+
+
+ Model Input
+
+
+ {formatCost(log.cost?.input || 0)}
+
+
+
+
+ Model Output
+
+
+ {formatCost(log.cost?.output || 0)}
+
+
+ {(() => {
+ const models = (log.cost as Record
)?.models as
+ | Record
+ | undefined
+ const totalToolCost = models
+ ? Object.values(models).reduce((sum, m) => sum + (m?.toolCost || 0), 0)
+ : 0
+ return totalToolCost > 0 ? (
+
+
+ Tool Usage
+
+
+ {formatCost(totalToolCost)}
+
+
+ ) : null
+ })()}
+
+
+ Total
+
+
+ {formatCost(log.cost?.total || 0)}
+
+
+
+
+ Tokens
+
+
+ {log.cost?.tokens?.input || log.cost?.tokens?.prompt || 0} in ·{' '}
+ {log.cost?.tokens?.output || log.cost?.tokens?.completion || 0} out
+
+
+
+
+ Total includes a {formatCost(BASE_EXECUTION_CHARGE)} base charge plus
+ model and tool usage.
+
+ )}
+
+
-
-
- Total cost includes a base run charge of {formatCost(BASE_EXECUTION_CHARGE)}{' '}
- plus any model and tool usage costs.
-
-
-
- )}
-
-
+ {/* Trace Tab */}
+ {showTraceTab && log.executionData?.traceSpans && (
+
+
+
+ )}
+
)}
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx
index a9dba9f471d..01b867e25e7 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx
@@ -8,7 +8,7 @@ import {
DropdownMenuSeparator,
DropdownMenuTrigger,
} from '@/components/emcn'
-import { Copy, Eye, Link, ListFilter, SquareArrowUpRight, X } from '@/components/emcn/icons'
+import { Copy, Eye, Link, ListFilter, Redo, SquareArrowUpRight, X } from '@/components/emcn/icons'
import type { WorkflowLog } from '@/stores/logs/filters/types'
interface LogRowContextMenuProps {
@@ -23,6 +23,8 @@ interface LogRowContextMenuProps {
onToggleWorkflowFilter: () => void
onClearAllFilters: () => void
onCancelExecution: () => void
+ onRetryExecution: () => void
+ isRetryPending?: boolean
isFilteredByThisWorkflow: boolean
hasActiveFilters: boolean
}
@@ -43,6 +45,8 @@ export const LogRowContextMenu = memo(function LogRowContextMenu({
onToggleWorkflowFilter,
onClearAllFilters,
onCancelExecution,
+ onRetryExecution,
+ isRetryPending = false,
isFilteredByThisWorkflow,
hasActiveFilters,
}: LogRowContextMenuProps) {
@@ -50,6 +54,7 @@ export const LogRowContextMenu = memo(function LogRowContextMenu({
const hasWorkflow = Boolean(log?.workflow?.id || log?.workflowId)
const isCancellable =
(log?.status === 'running' || log?.status === 'pending') && hasExecutionId && hasWorkflow
+ const isRetryable = log?.status === 'failed' && hasWorkflow
return (
!open && onClose()} modal={false}>
@@ -73,6 +78,15 @@ export const LogRowContextMenu = memo(function LogRowContextMenu({
sideOffset={4}
onCloseAutoFocus={(e) => e.preventDefault()}
>
+ {isRetryable && (
+ <>
+
+
+ {isRetryPending ? 'Retrying...' : 'Retry'}
+
+
+ >
+ )}
{isCancellable && (
<>
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx
index b895f447b57..cbaed69a90b 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx
@@ -248,7 +248,7 @@ export function AutocompleteSearch({
([])
const selectedLogIndexRef = useRef(-1)
const selectedLogIdRef = useRef(null)
+ const shouldScrollIntoViewRef = useRef(false)
const logsRefetchRef = useRef<() => void>(() => {})
const activeLogRefetchRef = useRef<() => void>(() => {})
const logsQueryRef = useRef({ isFetching: false, hasNextPage: false, fetchNextPage: () => {} })
@@ -462,6 +468,7 @@ export default function Logs() {
const idx = selectedLogIndexRef.current
const currentLogs = logsRef.current
if (idx < currentLogs.length - 1) {
+ shouldScrollIntoViewRef.current = true
dispatch({ type: 'SELECT_LOG', logId: currentLogs[idx + 1].id })
}
}, [])
@@ -469,6 +476,7 @@ export default function Logs() {
const handleNavigatePrev = useCallback(() => {
const idx = selectedLogIndexRef.current
if (idx > 0) {
+ shouldScrollIntoViewRef.current = true
dispatch({ type: 'SELECT_LOG', logId: logsRef.current[idx - 1].id })
}
}, [])
@@ -532,6 +540,7 @@ export default function Logs() {
}, [contextMenuLog])
const cancelExecution = useCancelExecution()
+ const retryExecution = useRetryExecution()
const handleCancelExecution = useCallback(() => {
const workflowId = contextMenuLog?.workflow?.id || contextMenuLog?.workflowId
@@ -542,6 +551,37 @@ export default function Logs() {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [contextMenuLog])
+ const retryLog = useCallback(
+ async (log: WorkflowLog | null) => {
+ const workflowId = log?.workflow?.id || log?.workflowId
+ const logId = log?.id
+ if (!workflowId || !logId) return
+
+ try {
+ const detailLog = await queryClient.fetchQuery({
+ queryKey: logKeys.detail(logId),
+ queryFn: ({ signal }) => fetchLogDetail(logId, signal),
+ staleTime: 30 * 1000,
+ })
+ const input = extractRetryInput(detailLog)
+ await retryExecution.mutateAsync({ workflowId, input })
+ toast.success('Retry started')
+ } catch {
+ toast.error('Failed to retry execution')
+ }
+ },
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ []
+ )
+
+ const handleRetryExecution = useCallback(() => {
+ retryLog(contextMenuLog)
+ }, [contextMenuLog, retryLog])
+
+ const handleRetrySidebarExecution = useCallback(() => {
+ retryLog(selectedLog)
+ }, [selectedLog, retryLog])
+
const contextMenuWorkflowId = contextMenuLog?.workflow?.id || contextMenuLog?.workflowId
const isFilteredByThisWorkflow = Boolean(
contextMenuWorkflowId && workflowIds.length === 1 && workflowIds[0] === contextMenuWorkflowId
@@ -557,7 +597,8 @@ export default function Logs() {
})
useEffect(() => {
- if (!selectedLogId) return
+ if (!selectedLogId || !shouldScrollIntoViewRef.current) return
+ shouldScrollIntoViewRef.current = false
const row = document.querySelector(`[data-row-id="${selectedLogId}"]`) as HTMLElement | null
if (row) {
row.scrollIntoView({ behavior: 'smooth', block: 'nearest' })
@@ -676,6 +717,7 @@ export default function Logs() {
if (currentIndex === -1 && (e.key === 'ArrowUp' || e.key === 'ArrowDown')) {
e.preventDefault()
+ shouldScrollIntoViewRef.current = true
dispatch({ type: 'SELECT_LOG', logId: currentLogs[0].id })
return
}
@@ -780,6 +822,8 @@ export default function Logs() {
onNavigatePrev={handleNavigatePrev}
hasNext={selectedLogIndex < sortedLogs.length - 1}
hasPrev={selectedLogIndex > 0}
+ onRetryExecution={handleRetrySidebarExecution}
+ isRetryPending={retryExecution.isPending}
/>
),
[
@@ -788,6 +832,8 @@ export default function Logs() {
handleCloseSidebar,
handleNavigateNext,
handleNavigatePrev,
+ handleRetrySidebarExecution,
+ retryExecution.isPending,
selectedLogIndex,
sortedLogs.length,
]
@@ -1193,6 +1239,8 @@ export default function Logs() {
onOpenWorkflow={handleOpenWorkflow}
onOpenPreview={handleOpenPreview}
onCancelExecution={handleCancelExecution}
+ onRetryExecution={handleRetryExecution}
+ isRetryPending={retryExecution.isPending}
onToggleWorkflowFilter={handleToggleWorkflowFilter}
onClearAllFilters={handleClearAllFilters}
isFilteredByThisWorkflow={isFilteredByThisWorkflow}
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/utils.ts b/apps/sim/app/workspace/[workspaceId]/logs/utils.ts
index 535ab8000d9..bfca8a90b5a 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/utils.ts
+++ b/apps/sim/app/workspace/[workspaceId]/logs/utils.ts
@@ -4,6 +4,7 @@ import { format } from 'date-fns'
import { Badge } from '@/components/emcn'
import { getIntegrationMetadata } from '@/lib/logs/get-trigger-options'
import { getBlock } from '@/blocks/registry'
+import type { WorkflowLog } from '@/stores/logs/filters/types'
import { CORE_TRIGGER_TYPES } from '@/stores/logs/filters/types'
export const LOG_COLUMNS = {
@@ -442,3 +443,37 @@ export const formatDate = (dateString: string) => {
})(),
}
}
+
+/**
+ * Extracts the original workflow input from a log entry for retry.
+ * Prefers the persisted `workflowInput` field (new logs), falls back to
+ * reconstructing from `executionState.blockStates` (old logs).
+ */
+export function extractRetryInput(log: WorkflowLog): unknown | undefined {
+ const execData = log.executionData as Record | undefined
+ if (!execData) return undefined
+
+ if (execData.workflowInput !== undefined) {
+ return execData.workflowInput
+ }
+
+ const executionState = execData.executionState as
+ | {
+ blockStates?: Record<
+ string,
+ { output?: unknown; executed?: boolean; executionTime?: number }
+ >
+ }
+ | undefined
+ if (!executionState?.blockStates) return undefined
+
+ // Starter/trigger blocks are pre-populated with executed: false and
+ // executionTime: 0, which distinguishes them from blocks that actually ran.
+ for (const state of Object.values(executionState.blockStates)) {
+ if (state.executed === false && state.executionTime === 0 && state.output != null) {
+ return state.output
+ }
+ }
+
+ return undefined
+}
diff --git a/apps/sim/app/workspace/[workspaceId]/settings/components/credentials/credentials-manager.tsx b/apps/sim/app/workspace/[workspaceId]/settings/components/credentials/credentials-manager.tsx
index 42cda8def1c..a9a7b2e29bf 100644
--- a/apps/sim/app/workspace/[workspaceId]/settings/components/credentials/credentials-manager.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/settings/components/credentials/credentials-manager.tsx
@@ -22,6 +22,7 @@ import {
Textarea,
Tooltip,
Trash,
+ toast,
} from '@/components/emcn'
import { Input } from '@/components/ui'
import { useSession } from '@/lib/auth/auth-client'
@@ -60,7 +61,6 @@ const logger = createLogger('SecretsManager')
const GRID_COLS = 'grid grid-cols-[minmax(0,1fr)_8px_minmax(0,1fr)_auto_auto] items-center'
const COL_SPAN_ALL = 'col-span-5'
-const CONFLICT_CLASS = 'border-[var(--text-error)] bg-[var(--error-muted)]'
const ROLE_OPTIONS = [
{ value: 'member', label: 'Member' },
@@ -402,7 +402,6 @@ export function CredentialsManager() {
const isWorkspaceAdmin = workspacePermissions?.viewer?.isAdmin ?? false
const isLoading = isPersonalLoading || isWorkspaceLoading
- const variables = useMemo(() => personalEnvData || {}, [personalEnvData])
const [envVars, setEnvVars] = useState([])
const [newWorkspaceRows, setNewWorkspaceRows] = useState([
@@ -591,7 +590,7 @@ export function CredentialsManager() {
useEffect(() => {
if (hasSavedRef.current) return
- const existingVars = Object.values(variables)
+ const existingVars = Object.values(personalEnvData || {})
const initialVars = [
...existingVars.map((envVar) => ({
...envVar,
@@ -601,7 +600,7 @@ export function CredentialsManager() {
]
initialVarsRef.current = JSON.parse(JSON.stringify(initialVars))
setEnvVars(JSON.parse(JSON.stringify(initialVars)))
- }, [variables])
+ }, [personalEnvData])
useEffect(() => {
if (!workspaceEnvData) return
@@ -1041,11 +1040,15 @@ export function CredentialsManager() {
setWorkspaceVars(mergedWorkspaceVars)
setNewWorkspaceRows([createEmptyEnvVar()])
+ if (mutations.length > 0) {
+ toast.success('Secrets saved')
+ }
} catch (error) {
hasSavedRef.current = false
initialVarsRef.current = prevInitialVars
initialWorkspaceVarsRef.current = prevInitialWorkspaceVars
logger.error('Failed to save environment variables:', error)
+ toast.error('Failed to save secrets')
} finally {
if (mutations.length > 0) {
queryClient.invalidateQueries({ queryKey: workspaceCredentialKeys.lists() })
@@ -1095,7 +1098,7 @@ export function CredentialsManager() {
onFocus={(e) => e.target.removeAttribute('readOnly')}
className={cn(
'h-9',
- isConflict && CONFLICT_CLASS,
+ isConflict && 'border-[var(--text-error)]',
keyError && 'border-[var(--text-error)]'
)}
/>
@@ -1115,8 +1118,6 @@ export function CredentialsManager() {
onBlur={() => setFocusedValueIndex(null)}
onPaste={(e) => handlePaste(e, originalIndex)}
placeholder={isConflict ? 'Workspace override active' : 'Enter value'}
- disabled={isConflict}
- aria-disabled={isConflict}
name={`env_variable_value_${envVar.id || originalIndex}_${Math.random()}`}
autoComplete='off'
autoCapitalize='off'
@@ -1125,12 +1126,11 @@ export function CredentialsManager() {
style={maskedValueStyle}
className={cn(
'h-9',
- !isComplete && 'col-span-2',
- isConflict && 'cursor-not-allowed',
- isConflict && CONFLICT_CLASS
+ (!isComplete || isConflict) && 'col-span-2',
+ isConflict && 'cursor-not-allowed opacity-50'
)}
/>
- {isComplete && (
+ {isComplete && !isConflict && (
handleViewDetails(envVar.key, 'env_personal')}
@@ -1267,7 +1267,7 @@ export function CredentialsManager() {
+
{detailsError}
)}
@@ -1299,7 +1299,7 @@ export function CredentialsManager() {
-
+
{member.userName || member.userEmail || member.userId}
diff --git a/apps/sim/app/workspace/[workspaceId]/settings/components/integrations/integrations-manager.tsx b/apps/sim/app/workspace/[workspaceId]/settings/components/integrations/integrations-manager.tsx
index 5fbe4188648..998709cd615 100644
--- a/apps/sim/app/workspace/[workspaceId]/settings/components/integrations/integrations-manager.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/settings/components/integrations/integrations-manager.tsx
@@ -1288,7 +1288,7 @@ export function IntegrationsManager() {
{detailsError && (
-
+
{detailsError}
)}
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/messages-input/messages-input.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/messages-input/messages-input.tsx
index 361a85582d3..55a03aacf56 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/messages-input/messages-input.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/messages-input/messages-input.tsx
@@ -573,6 +573,7 @@ export function MessagesInput({
setOpenPopoverIndex(open ? index : null)}
+ colorScheme='inverted'
>
setUsageControlPopoverIndex(open ? toolIndex : null)}
+ colorScheme='inverted'
>
@@ -663,7 +661,7 @@ function SubflowConfigDisplay({ block, loop, parallel }: SubflowConfigDisplayPro
disabled
className='mb-1'
/>
-
+
Enter a number between 1 and {config.maxIterations}
@@ -1091,7 +1089,7 @@ function PreviewEditorContent({
const subflowName = block.name || (isLoop ? 'Loop' : 'Parallel')
return (
-
+
{/* Header - styled like subflow header */}
+
@@ -1180,7 +1178,7 @@ function PreviewEditorContent({
: 'gray'
return (
-
+
{/* Header - styled like editor */}
{block.type !== 'note' && (
@@ -1188,10 +1186,7 @@ function PreviewEditorContent({
className='flex h-[18px] w-[18px] flex-shrink-0 items-center justify-center rounded-sm'
style={{ backgroundColor: blockConfig.bgColor }}
>
-
+
)}
@@ -1394,7 +1389,7 @@ function PreviewEditorContent({
className='h-[18px] w-[18px] animate-spin rounded-full'
style={{
background:
- 'conic-gradient(from 0deg, hsl(var(--muted-foreground)) 0deg 120deg, transparent 120deg 180deg, hsl(var(--muted-foreground)) 180deg 300deg, transparent 300deg 360deg)',
+ 'conic-gradient(from 0deg, var(--text-tertiary) 0deg 120deg, transparent 120deg 180deg, var(--text-tertiary) 180deg 300deg, transparent 300deg 360deg)',
mask: 'radial-gradient(farthest-side, transparent calc(100% - 1.5px), black calc(100% - 1.5px))',
WebkitMask:
'radial-gradient(farthest-side, transparent calc(100% - 1.5px), black calc(100% - 1.5px))',
diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/preview/preview.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/preview/preview.tsx
index ac19ec9d1d1..4d3e7fd45f5 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/components/preview/preview.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/w/components/preview/preview.tsx
@@ -1,6 +1,7 @@
'use client'
-import { useCallback, useMemo, useState } from 'react'
+import type React from 'react'
+import { useEffect, useMemo, useRef, useState } from 'react'
import { ArrowLeft } from 'lucide-react'
import { Button, Tooltip } from '@/components/emcn'
import { redactApiKeys } from '@/lib/core/security/redaction'
@@ -126,8 +127,14 @@ interface PreviewProps {
initialSelectedBlockId?: string | null
/** Whether to auto-select the leftmost block on mount */
autoSelectLeftmost?: boolean
+ /** Whether to show the close (X) button on the block detail panel */
+ showBlockCloseButton?: boolean
}
+const MIN_PANEL_WIDTH = 280
+const MAX_PANEL_WIDTH = 600
+const DEFAULT_PANEL_WIDTH = 320
+
/**
* Main preview component that combines PreviewCanvas with PreviewEditor
* and handles nested workflow navigation via a stack.
@@ -151,7 +158,47 @@ export function Preview({
showBorder = false,
initialSelectedBlockId,
autoSelectLeftmost = true,
+ showBlockCloseButton = true,
}: PreviewProps) {
+ const [panelWidth, setPanelWidth] = useState(DEFAULT_PANEL_WIDTH)
+ const panelWidthRef = useRef(DEFAULT_PANEL_WIDTH)
+ panelWidthRef.current = panelWidth
+ const isResizingRef = useRef(false)
+ const startXRef = useRef(0)
+ const startWidthRef = useRef(0)
+
+ function handleResizeMouseDown(e: React.MouseEvent) {
+ isResizingRef.current = true
+ startXRef.current = e.clientX
+ startWidthRef.current = panelWidthRef.current
+ document.body.style.cursor = 'ew-resize'
+ document.body.style.userSelect = 'none'
+ }
+
+ useEffect(() => {
+ const handleMouseMove = (e: MouseEvent) => {
+ if (!isResizingRef.current) return
+ const delta = startXRef.current - e.clientX
+ setPanelWidth(
+ Math.max(MIN_PANEL_WIDTH, Math.min(MAX_PANEL_WIDTH, startWidthRef.current + delta))
+ )
+ }
+ const handleMouseUp = () => {
+ if (!isResizingRef.current) return
+ isResizingRef.current = false
+ document.body.style.cursor = ''
+ document.body.style.userSelect = ''
+ }
+ document.addEventListener('mousemove', handleMouseMove)
+ document.addEventListener('mouseup', handleMouseUp)
+ return () => {
+ document.removeEventListener('mousemove', handleMouseMove)
+ document.removeEventListener('mouseup', handleMouseUp)
+ document.body.style.cursor = ''
+ document.body.style.userSelect = ''
+ }
+ }, [])
+
const [pinnedBlockId, setPinnedBlockId] = useState(() => {
if (initialSelectedBlockId) return initialSelectedBlockId
if (autoSelectLeftmost) {
@@ -173,67 +220,55 @@ export function Preview({
return buildBlockExecutions(rootTraceSpans)
}, [providedBlockExecutions, rootTraceSpans])
- const blockExecutions = useMemo(() => {
- if (workflowStack.length > 0) {
- return workflowStack[workflowStack.length - 1].blockExecutions
- }
- return rootBlockExecutions
- }, [workflowStack, rootBlockExecutions])
-
- const workflowState = useMemo(() => {
- if (workflowStack.length > 0) {
- return workflowStack[workflowStack.length - 1].workflowState
- }
- return rootWorkflowState
- }, [workflowStack, rootWorkflowState])
-
- const isExecutionMode = useMemo(() => {
- return Object.keys(blockExecutions).length > 0
- }, [blockExecutions])
-
- const handleDrillDown = useCallback(
- (blockId: string, childWorkflowState: WorkflowState) => {
- const blockExecution = blockExecutions[blockId]
- const childTraceSpans = extractChildTraceSpans(blockExecution)
- const childBlockExecutions = buildBlockExecutions(childTraceSpans)
-
- const workflowName =
- childWorkflowState.metadata?.name ||
- (blockExecution?.output as { childWorkflowName?: string } | undefined)?.childWorkflowName ||
- 'Nested Workflow'
-
- setWorkflowStack((prev) => [
- ...prev,
- {
- workflowState: childWorkflowState,
- traceSpans: childTraceSpans,
- blockExecutions: childBlockExecutions,
- workflowName,
- },
- ])
-
- const leftmostId = getLeftmostBlockId(childWorkflowState)
- setPinnedBlockId(leftmostId)
- },
- [blockExecutions]
- )
+ const currentStackEntry =
+ workflowStack.length > 0 ? workflowStack[workflowStack.length - 1] : null
+ const blockExecutions = currentStackEntry
+ ? currentStackEntry.blockExecutions
+ : rootBlockExecutions
+ const workflowState = currentStackEntry ? currentStackEntry.workflowState : rootWorkflowState
+
+ const isExecutionMode = Object.keys(blockExecutions).length > 0
+
+ function handleDrillDown(blockId: string, childWorkflowState: WorkflowState) {
+ const blockExecution = blockExecutions[blockId]
+ const childTraceSpans = extractChildTraceSpans(blockExecution)
+ const childBlockExecutions = buildBlockExecutions(childTraceSpans)
+
+ const workflowName =
+ childWorkflowState.metadata?.name ||
+ (blockExecution?.output as { childWorkflowName?: string } | undefined)?.childWorkflowName ||
+ 'Nested Workflow'
+
+ setWorkflowStack((prev) => [
+ ...prev,
+ {
+ workflowState: childWorkflowState,
+ traceSpans: childTraceSpans,
+ blockExecutions: childBlockExecutions,
+ workflowName,
+ },
+ ])
+
+ const leftmostId = getLeftmostBlockId(childWorkflowState)
+ setPinnedBlockId(leftmostId)
+ }
- const handleGoBack = useCallback(() => {
+ function handleGoBack() {
setWorkflowStack((prev) => prev.slice(0, -1))
setPinnedBlockId(null)
- }, [])
+ }
- const handleNodeClick = useCallback((blockId: string) => {
+ function handleNodeClick(blockId: string) {
setPinnedBlockId(blockId)
- }, [])
+ }
- const handlePaneClick = useCallback(() => {
+ function handlePaneClick() {
setPinnedBlockId(null)
- }, [])
+ }
- const handleEditorClose = useCallback(() => {
+ function handleEditorClose() {
setPinnedBlockId(null)
- }, [])
+ }
const isNested = workflowStack.length > 0
@@ -289,19 +324,26 @@ export function Preview({
{pinnedBlockId && workflowState.blocks[pinnedBlockId] && (
-
+
+ {/* Left-edge resize handle */}
+
+
+
)}
)
diff --git a/apps/sim/components/emcn/components/input/input.tsx b/apps/sim/components/emcn/components/input/input.tsx
index 662dc320dac..b216d67bb52 100644
--- a/apps/sim/components/emcn/components/input/input.tsx
+++ b/apps/sim/components/emcn/components/input/input.tsx
@@ -26,7 +26,7 @@ import { cn } from '@/lib/core/utils/cn'
* Currently supports a 'default' variant.
*/
const inputVariants = cva(
- 'flex w-full touch-manipulation rounded-sm border border-[var(--border-1)] bg-[var(--surface-5)] px-2 py-1.5 font-medium font-sans text-sm text-[var(--text-primary)] transition-colors placeholder:text-[var(--text-muted)] outline-none disabled:cursor-not-allowed disabled:opacity-50',
+ 'flex w-full touch-manipulation rounded-sm border border-[var(--border-1)] bg-[var(--surface-5)] px-2 py-1.5 font-medium font-sans text-sm text-[var(--text-primary)] transition-colors placeholder:text-[var(--text-muted)] outline-none disabled:cursor-not-allowed disabled:opacity-50 scroll-pr-1',
{
variants: {
variant: {
diff --git a/apps/sim/components/emcn/components/modal/modal.tsx b/apps/sim/components/emcn/components/modal/modal.tsx
index dcca3f3cb69..a04b5009eb6 100644
--- a/apps/sim/components/emcn/components/modal/modal.tsx
+++ b/apps/sim/components/emcn/components/modal/modal.tsx
@@ -157,11 +157,11 @@ const ModalContent = React.forwardRef<
= {}
const nodeMetadata = {
@@ -168,10 +173,11 @@ export class BlockExecutor {
})) as NormalizedBlockOutput
}
+ const endedAt = new Date().toISOString()
const duration = performance.now() - startTime
if (blockLog) {
- blockLog.endedAt = new Date().toISOString()
+ blockLog.endedAt = endedAt
blockLog.durationMs = duration
blockLog.success = true
blockLog.output = filterOutputForLog(block.metadata?.id || '', normalizedOutput, { block })
@@ -190,7 +196,7 @@ export class BlockExecutor {
const displayOutput = filterOutputForLog(block.metadata?.id || '', normalizedOutput, {
block,
})
- await this.callOnBlockComplete(
+ this.fireBlockCompleteCallback(
ctx,
node,
block,
@@ -248,6 +254,7 @@ export class BlockExecutor {
isSentinel: boolean,
phase: 'input_resolution' | 'execution'
): Promise {
+ const endedAt = new Date().toISOString()
const duration = performance.now() - startTime
const errorMessage = normalizeError(error)
const hasResolvedInputs =
@@ -272,7 +279,7 @@ export class BlockExecutor {
this.state.setBlockOutput(node.id, errorOutput, duration)
if (blockLog) {
- blockLog.endedAt = new Date().toISOString()
+ blockLog.endedAt = endedAt
blockLog.durationMs = duration
blockLog.success = false
blockLog.error = errorMessage
@@ -298,7 +305,7 @@ export class BlockExecutor {
? error.childWorkflowInstanceId
: undefined
const displayOutput = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
- await this.callOnBlockComplete(
+ this.fireBlockCompleteCallback(
ctx,
node,
block,
@@ -350,7 +357,8 @@ export class BlockExecutor {
ctx: ExecutionContext,
blockId: string,
block: SerializedBlock,
- node: DAGNode
+ node: DAGNode,
+ startedAt: string
): BlockLog {
let blockName = block.metadata?.name ?? blockId
let loopId: string | undefined
@@ -383,7 +391,7 @@ export class BlockExecutor {
blockId,
blockName,
blockType: block.metadata?.id ?? DEFAULTS.BLOCK_TYPE,
- startedAt: new Date().toISOString(),
+ startedAt,
executionOrder: getNextExecutionOrder(ctx),
endedAt: '',
durationMs: 0,
@@ -450,39 +458,47 @@ export class BlockExecutor {
return redactApiKeys(result)
}
- private async callOnBlockStart(
+ /**
+ * Fires the `onBlockStart` progress callback without blocking block execution.
+ * Any error is logged and swallowed so callback I/O never stalls the critical path.
+ */
+ private fireBlockStartCallback(
ctx: ExecutionContext,
node: DAGNode,
block: SerializedBlock,
executionOrder: number
- ): Promise {
+ ): void {
+ if (!this.contextExtensions.onBlockStart) return
+
const blockId = node.metadata?.originalBlockId ?? node.id
const blockName = block.metadata?.name ?? blockId
const blockType = block.metadata?.id ?? DEFAULTS.BLOCK_TYPE
-
const iterationContext = getIterationContext(ctx, node?.metadata)
- if (this.contextExtensions.onBlockStart) {
- try {
- await this.contextExtensions.onBlockStart(
- blockId,
- blockName,
- blockType,
- executionOrder,
- iterationContext,
- ctx.childWorkflowContext
- )
- } catch (error) {
+ void this.contextExtensions
+ .onBlockStart(
+ blockId,
+ blockName,
+ blockType,
+ executionOrder,
+ iterationContext,
+ ctx.childWorkflowContext
+ )
+ .catch((error) => {
this.execLogger.warn('Block start callback failed', {
blockId,
blockType,
error: toError(error).message,
})
- }
- }
+ })
}
- private async callOnBlockComplete(
+ /**
+ * Fires the `onBlockComplete` progress callback without blocking subsequent blocks.
+ * The callback typically performs DB writes for progress markers — awaiting it would
+ * add latency between blocks and skew wall-clock timing in the trace view.
+ */
+ private fireBlockCompleteCallback(
ctx: ExecutionContext,
node: DAGNode,
block: SerializedBlock,
@@ -493,39 +509,38 @@ export class BlockExecutor {
executionOrder: number,
endedAt: string,
childWorkflowInstanceId?: string
- ): Promise {
+ ): void {
+ if (!this.contextExtensions.onBlockComplete) return
+
const blockId = node.metadata?.originalBlockId ?? node.id
const blockName = block.metadata?.name ?? blockId
const blockType = block.metadata?.id ?? DEFAULTS.BLOCK_TYPE
-
const iterationContext = getIterationContext(ctx, node?.metadata)
- if (this.contextExtensions.onBlockComplete) {
- try {
- await this.contextExtensions.onBlockComplete(
- blockId,
- blockName,
- blockType,
- {
- input,
- output,
- executionTime: duration,
- startedAt,
- executionOrder,
- endedAt,
- childWorkflowInstanceId,
- },
- iterationContext,
- ctx.childWorkflowContext
- )
- } catch (error) {
+ void this.contextExtensions
+ .onBlockComplete(
+ blockId,
+ blockName,
+ blockType,
+ {
+ input,
+ output,
+ executionTime: duration,
+ startedAt,
+ executionOrder,
+ endedAt,
+ childWorkflowInstanceId,
+ },
+ iterationContext,
+ ctx.childWorkflowContext
+ )
+ .catch((error) => {
this.execLogger.warn('Block completion callback failed', {
blockId,
blockType,
error: toError(error).message,
})
- }
- }
+ })
}
private preparePauseResumeSelfReference(
diff --git a/apps/sim/executor/types.ts b/apps/sim/executor/types.ts
index 00caff1d9ef..d3133a238bd 100644
--- a/apps/sim/executor/types.ts
+++ b/apps/sim/executor/types.ts
@@ -74,19 +74,118 @@ export interface SerializedSnapshot {
triggerIds: string[]
}
+/**
+ * Identifies a tool call emitted by a model iteration. Matches the
+ * `tool_call.id` convention used by OpenAI, Anthropic, and the OTel GenAI
+ * spec so tool segments can be correlated back to the iteration that issued
+ * them.
+ */
+export interface IterationToolCall {
+ id: string
+ name: string
+ arguments: Record | string
+}
+
+/**
+ * A single phase of provider execution (model call or tool invocation).
+ *
+ * Providers emit these per iteration. Model segments carry the assistant's
+ * output for that iteration (text, thinking, tool_calls, tokens, finish
+ * reason) so the trace reveals *why* each tool was invoked — not just that
+ * it was. All content fields are optional; providers fill in what they have.
+ */
+export interface ProviderTimingSegment {
+ type: 'model' | 'tool'
+ name?: string
+ startTime: number
+ endTime: number
+ duration: number
+ assistantContent?: string
+ thinkingContent?: string
+ toolCalls?: IterationToolCall[]
+ toolCallId?: string
+ finishReason?: string
+ tokens?: BlockTokens
+ /** Cost for this segment in USD, derived from tokens + model pricing. */
+ cost?: { input?: number; output?: number; total?: number }
+ /** Time-to-first-token in ms (streaming only; first segment typically). */
+ ttft?: number
+ /** Provider system identifier (anthropic, openai, gemini, etc.) — `gen_ai.system`. */
+ provider?: string
+ /** Structured error class (e.g. `rate_limit`, `context_length`). */
+ errorType?: string
+ /** Human-readable error message when this segment failed. */
+ errorMessage?: string
+}
+
+/** Timing info reported by an LLM provider for a single block execution. */
+export interface BlockProviderTiming {
+ startTime: string
+ endTime: string
+ duration: number
+ modelTime?: number
+ toolsTime?: number
+ firstResponseTime?: number
+ iterations?: number
+ timeSegments?: ProviderTimingSegment[]
+}
+
+/** Cost breakdown from provider usage. */
+export interface BlockCost {
+ input: number
+ output: number
+ total: number
+ toolCost?: number
+ pricing?: {
+ input: number
+ output: number
+ cachedInput?: number
+ updatedAt: string
+ }
+}
+
+/** Token usage from provider. `prompt`/`completion` are legacy aliases. */
+export interface BlockTokens {
+ input?: number
+ output?: number
+ total?: number
+ prompt?: number
+ completion?: number
+ /** Input tokens served from the provider's prompt cache. */
+ cacheRead?: number
+ /** Input tokens newly written to the provider's prompt cache. */
+ cacheWrite?: number
+ /** Output tokens consumed by reasoning/thinking (o-series, Claude, Gemini). */
+ reasoning?: number
+}
+
+/** A single tool invocation recorded by an agent-type block. */
+export interface BlockToolCall {
+ name: string
+ duration?: number
+ startTime?: string
+ endTime?: string
+ error?: string
+ arguments?: Record
+ input?: Record
+ result?: Record
+ output?: Record
+}
+
+/** Normalized tool-call container emitted by providers. */
+export interface BlockToolCalls {
+ list: BlockToolCall[]
+ count: number
+}
+
export interface NormalizedBlockOutput {
[key: string]: any
content?: string
model?: string
- tokens?: {
- input?: number
- output?: number
- total?: number
- }
- toolCalls?: {
- list: any[]
- count: number
- }
+ tokens?: BlockTokens
+ toolCalls?: BlockToolCalls
+ providerTiming?: BlockProviderTiming
+ cost?: BlockCost
files?: UserFile[]
selectedPath?: {
blockId: string
@@ -115,8 +214,8 @@ export interface BlockLog {
endedAt: string
durationMs: number
success: boolean
- output?: any
- input?: any
+ output?: NormalizedBlockOutput
+ input?: Record
error?: string
/** Whether this error was handled by an error handler path (error port) */
errorHandled?: boolean
diff --git a/apps/sim/hooks/queries/logs.ts b/apps/sim/hooks/queries/logs.ts
index bbbcea5ba7c..15beff689e7 100644
--- a/apps/sim/hooks/queries/logs.ts
+++ b/apps/sim/hooks/queries/logs.ts
@@ -25,9 +25,9 @@ export const logKeys = {
[...logKeys.lists(), workspaceId ?? '', filters] as const,
details: () => [...logKeys.all, 'detail'] as const,
detail: (logId: string | undefined) => [...logKeys.details(), logId ?? ''] as const,
- statsAll: () => [...logKeys.all, 'stats'] as const,
- stats: (workspaceId: string | undefined, filters: object) =>
- [...logKeys.statsAll(), workspaceId ?? '', filters] as const,
+ stats: () => [...logKeys.all, 'stats'] as const,
+ stat: (workspaceId: string | undefined, filters: object) =>
+ [...logKeys.stats(), workspaceId ?? '', filters] as const,
executionSnapshots: () => [...logKeys.all, 'executionSnapshot'] as const,
executionSnapshot: (executionId: string | undefined) =>
[...logKeys.executionSnapshots(), executionId ?? ''] as const,
@@ -121,7 +121,7 @@ async function fetchLogsPage(
}
}
-async function fetchLogDetail(logId: string, signal?: AbortSignal): Promise {
+export async function fetchLogDetail(logId: string, signal?: AbortSignal): Promise {
const response = await fetch(`/api/logs/${logId}`, { signal })
if (!response.ok) {
@@ -223,7 +223,7 @@ export function useDashboardStats(
options?: UseDashboardStatsOptions
) {
return useQuery({
- queryKey: logKeys.stats(workspaceId, filters),
+ queryKey: logKeys.stat(workspaceId, filters),
queryFn: ({ signal }) => fetchDashboardStats(workspaceId as string, filters, signal),
enabled: Boolean(workspaceId) && (options?.enabled ?? true),
refetchInterval: options?.refetchInterval ?? false,
@@ -328,7 +328,38 @@ export function useCancelExecution() {
onSettled: () => {
queryClient.invalidateQueries({ queryKey: logKeys.lists() })
queryClient.invalidateQueries({ queryKey: logKeys.details() })
- queryClient.invalidateQueries({ queryKey: logKeys.statsAll() })
+ queryClient.invalidateQueries({ queryKey: logKeys.stats() })
+ },
+ })
+}
+
+export function useRetryExecution() {
+ const queryClient = useQueryClient()
+ return useMutation({
+ mutationFn: async ({ workflowId, input }: { workflowId: string; input?: unknown }) => {
+ const res = await fetch(`/api/workflows/${workflowId}/execute`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ input, triggerType: 'manual', stream: true }),
+ })
+ if (!res.ok) {
+ const data = await res.json().catch(() => ({}))
+ throw new Error(data.error || 'Failed to retry execution')
+ }
+ // The ReadableStream is lazy — start() only runs when read.
+ // Read one chunk to trigger execution, then cancel. Execution continues
+ // server-side after client disconnect.
+ const reader = res.body?.getReader()
+ if (reader) {
+ await reader.read()
+ reader.cancel()
+ }
+ return { started: true }
+ },
+ onSettled: () => {
+ queryClient.invalidateQueries({ queryKey: logKeys.lists() })
+ queryClient.invalidateQueries({ queryKey: logKeys.details() })
+ queryClient.invalidateQueries({ queryKey: logKeys.stats() })
},
})
}
diff --git a/apps/sim/lib/copilot/tools/server/jobs/get-job-logs.ts b/apps/sim/lib/copilot/tools/server/jobs/get-job-logs.ts
index 90ed8fbbe51..af11c333c0d 100644
--- a/apps/sim/lib/copilot/tools/server/jobs/get-job-logs.ts
+++ b/apps/sim/lib/copilot/tools/server/jobs/get-job-logs.ts
@@ -4,6 +4,7 @@ import { createLogger } from '@sim/logger'
import { and, desc, eq } from 'drizzle-orm'
import { GetJobLogs } from '@/lib/copilot/generated/tool-catalog-v1'
import type { BaseServerTool, ServerToolContext } from '@/lib/copilot/tools/server/base-tool'
+import type { TraceSpan } from '@/lib/logs/types'
import { checkWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('GetJobLogsServerTool')
@@ -38,29 +39,68 @@ interface JobLogEntry {
tokens?: unknown
}
-function extractToolCalls(traceSpan: any): ToolCallDetail[] {
- if (!traceSpan?.toolCalls || !Array.isArray(traceSpan.toolCalls)) return []
+/**
+ * Walks the trace-span tree and collects tool invocations from both data shapes:
+ * - New: `type: 'tool'` spans nested under agent blocks in `children`.
+ * - Legacy: a `toolCalls` array hanging off the agent span directly (pre-unification).
+ */
+function collectToolCalls(spans: TraceSpan[] | undefined): ToolCallDetail[] {
+ if (!spans?.length) return []
+ const collected: ToolCallDetail[] = []
+
+ const visit = (span: TraceSpan) => {
+ if (span.type === 'tool') {
+ const output = span.output as { result?: unknown } | undefined
+ collected.push({
+ name: span.name || 'unknown',
+ input: span.input ?? {},
+ output: output?.result ?? span.output,
+ error: span.status === 'error' ? errorMessageFromSpan(span) : undefined,
+ duration: span.duration || 0,
+ })
+ return
+ }
+
+ if (span.toolCalls?.length) {
+ for (const tc of span.toolCalls) {
+ collected.push({
+ name: tc.name || 'unknown',
+ input: tc.input ?? {},
+ output: tc.output ?? undefined,
+ error: tc.error || undefined,
+ duration: tc.duration || 0,
+ })
+ }
+ }
+
+ if (span.children?.length) {
+ for (const child of span.children) visit(child)
+ }
+ }
+
+ for (const span of spans) visit(span)
+ return collected
+}
- return traceSpan.toolCalls.map((tc: any) => ({
- name: tc.name || 'unknown',
- input: tc.input || tc.arguments || {},
- output: tc.output || tc.result || undefined,
- error: tc.error || undefined,
- duration: tc.duration || 0,
- }))
+function errorMessageFromSpan(span: TraceSpan): string | undefined {
+ const out = span.output as { error?: unknown } | undefined
+ if (typeof out?.error === 'string') return out.error
+ return undefined
}
-function extractOutputAndError(executionData: any): {
+function extractOutputAndError(
+ executionData: { traceSpans?: TraceSpan[] } & Record
+): {
output: unknown
error: string | undefined
toolCalls: ToolCallDetail[]
cost: unknown
tokens: unknown
} {
- const traceSpans = executionData?.traceSpans || []
+ const traceSpans = executionData?.traceSpans ?? []
const mainSpan = traceSpans[0]
- const toolCalls = mainSpan ? extractToolCalls(mainSpan) : []
+ const toolCalls = collectToolCalls(traceSpans)
const output = mainSpan?.output || executionData?.finalOutput || undefined
const cost = mainSpan?.cost || executionData?.cost || undefined
const tokens = mainSpan?.tokens || undefined
diff --git a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-logs.ts b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-logs.ts
index 3ab0cc2d573..0daf2aa07d0 100644
--- a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-logs.ts
+++ b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-logs.ts
@@ -5,7 +5,7 @@ import { authorizeWorkflowByWorkspacePermission } from '@sim/workflow-authz'
import { and, desc, eq } from 'drizzle-orm'
import { GetWorkflowLogs } from '@/lib/copilot/generated/tool-catalog-v1'
import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool'
-import type { TraceSpan } from '@/stores/logs/filters/types'
+import type { TraceSpan } from '@/lib/logs/types'
const logger = createLogger('GetWorkflowLogsServerTool')
diff --git a/apps/sim/lib/core/telemetry.ts b/apps/sim/lib/core/telemetry.ts
index 016de4b7614..8d3007744e8 100644
--- a/apps/sim/lib/core/telemetry.ts
+++ b/apps/sim/lib/core/telemetry.ts
@@ -100,6 +100,9 @@ const BLOCK_TYPE_MAPPING: Record<
}
if (span.tokens) {
+ // `TraceSpan.tokens` is typed as an object, but older persisted logs
+ // stored it as a bare number (total). Keep the numeric branch for those
+ // legacy rows.
if (typeof span.tokens === 'number') {
attrs[GenAIAttributes.USAGE_TOTAL_TOKENS] = span.tokens
} else {
diff --git a/apps/sim/lib/logs/execution/logger.ts b/apps/sim/lib/logs/execution/logger.ts
index d538ab738ad..07b7af219bb 100644
--- a/apps/sim/lib/logs/execution/logger.ts
+++ b/apps/sim/lib/logs/execution/logger.ts
@@ -47,17 +47,6 @@ const TRIGGER_COUNTER_MAP: Record = {
a2a: { key: 'totalA2aExecutions', column: 'total_a2a_executions' },
} as const
-export interface ToolCall {
- name: string
- duration: number // in milliseconds
- startTime: string // ISO timestamp
- endTime: string // ISO timestamp
- status: 'success' | 'error'
- input?: Record
- output?: Record
- error?: string
-}
-
const logger = createLogger('ExecutionLogger')
function countTraceSpans(traceSpans?: TraceSpan[]): number {
@@ -84,6 +73,7 @@ export class ExecutionLogger implements IExecutionLoggerService {
models: NonNullable
}
executionState?: SerializableExecutionState
+ workflowInput?: unknown
}): WorkflowExecutionLog['executionData'] {
const {
existingExecutionData,
@@ -93,6 +83,7 @@ export class ExecutionLogger implements IExecutionLoggerService {
completionFailure,
executionCost,
executionState,
+ workflowInput,
} = params
const traceSpanCount = countTraceSpans(traceSpans)
@@ -128,6 +119,7 @@ export class ExecutionLogger implements IExecutionLoggerService {
},
models: executionCost.models,
...(executionState ? { executionState } : {}),
+ ...(workflowInput !== undefined ? { workflowInput } : {}),
}
}
@@ -376,6 +368,7 @@ export class ExecutionLogger implements IExecutionLoggerService {
completionFailure,
executionCost,
executionState,
+ workflowInput,
})
const [updatedLog] = await db
diff --git a/apps/sim/lib/logs/execution/trace-spans/iteration-grouping.ts b/apps/sim/lib/logs/execution/trace-spans/iteration-grouping.ts
new file mode 100644
index 00000000000..1515ef7c42e
--- /dev/null
+++ b/apps/sim/lib/logs/execution/trace-spans/iteration-grouping.ts
@@ -0,0 +1,323 @@
+import { createLogger } from '@sim/logger'
+import type { TraceSpan } from '@/lib/logs/types'
+import { stripCloneSuffixes } from '@/executor/utils/subflow-utils'
+
+const logger = createLogger('IterationGrouping')
+
+/** Counter state for generating sequential container names. */
+interface ContainerNameCounters {
+ loopNumbers: Map
+ parallelNumbers: Map
+ loopCounter: number
+ parallelCounter: number
+}
+
+/**
+ * Builds a container-level TraceSpan (iteration wrapper or top-level container)
+ * from its source spans and resolved children.
+ */
+function buildContainerSpan(opts: {
+ id: string
+ name: string
+ type: string
+ sourceSpans: TraceSpan[]
+ children: TraceSpan[]
+}): TraceSpan {
+ const startTimes = opts.sourceSpans.map((s) => new Date(s.startTime).getTime())
+ const endTimes = opts.sourceSpans.map((s) => new Date(s.endTime).getTime())
+
+ // Guard against empty sourceSpans — Math.min/max of empty array returns ±Infinity
+ // which produces NaN durations and invalid Dates downstream.
+ const nowMs = Date.now()
+ const earliestStart = startTimes.length > 0 ? Math.min(...startTimes) : nowMs
+ const latestEnd = endTimes.length > 0 ? Math.max(...endTimes) : nowMs
+
+ const hasErrors = opts.sourceSpans.some((s) => s.status === 'error')
+ const allErrorsHandled =
+ hasErrors && opts.children.every((s) => s.status !== 'error' || s.errorHandled)
+
+ return {
+ id: opts.id,
+ name: opts.name,
+ type: opts.type,
+ duration: Math.max(0, latestEnd - earliestStart),
+ startTime: new Date(earliestStart).toISOString(),
+ endTime: new Date(latestEnd).toISOString(),
+ status: hasErrors ? 'error' : 'success',
+ ...(allErrorsHandled && { errorHandled: true }),
+ children: opts.children,
+ }
+}
+
+/**
+ * Resolves a container name from normal (non-iteration) spans or assigns a sequential number.
+ * Strips clone suffixes so all clones of the same container share one name/number.
+ */
+function resolveContainerName(
+ containerId: string,
+ containerType: 'parallel' | 'loop',
+ normalSpans: TraceSpan[],
+ counters: ContainerNameCounters
+): string {
+ const originalId = stripCloneSuffixes(containerId)
+
+ const matchingBlock = normalSpans.find(
+ (s) => s.blockId === originalId && s.type === containerType
+ )
+ if (matchingBlock?.name) return matchingBlock.name
+
+ if (containerType === 'parallel') {
+ if (!counters.parallelNumbers.has(originalId)) {
+ counters.parallelNumbers.set(originalId, counters.parallelCounter++)
+ }
+ return `Parallel ${counters.parallelNumbers.get(originalId)}`
+ }
+ if (!counters.loopNumbers.has(originalId)) {
+ counters.loopNumbers.set(originalId, counters.loopCounter++)
+ }
+ return `Loop ${counters.loopNumbers.get(originalId)}`
+}
+
+/**
+ * Classifies a span's immediate container ID and type from its metadata.
+ * Returns undefined for non-iteration spans.
+ */
+function classifySpanContainer(
+ span: TraceSpan
+): { containerId: string; containerType: 'parallel' | 'loop' } | undefined {
+ if (span.parallelId) {
+ return { containerId: span.parallelId, containerType: 'parallel' }
+ }
+ if (span.loopId) {
+ return { containerId: span.loopId, containerType: 'loop' }
+ }
+ if (span.blockId?.includes('_parallel_')) {
+ const match = span.blockId.match(/_parallel_([^_]+)_iteration_/)
+ if (match) {
+ return { containerId: match[1], containerType: 'parallel' }
+ }
+ }
+ return undefined
+}
+
+/**
+ * Finds the outermost container for a span. For nested spans, this is parentIterations[0].
+ * For flat spans, this is the span's own immediate container.
+ */
+function getOutermostContainer(
+ span: TraceSpan
+): { containerId: string; containerType: 'parallel' | 'loop' } | undefined {
+ if (span.parentIterations && span.parentIterations.length > 0) {
+ const outermost = span.parentIterations[0]
+ return {
+ containerId: outermost.iterationContainerId,
+ containerType: outermost.iterationType as 'parallel' | 'loop',
+ }
+ }
+ return classifySpanContainer(span)
+}
+
+/**
+ * Builds the iteration-level hierarchy for a container, recursively nesting
+ * any deeper subflows. Works with both:
+ * - Direct spans (spans whose immediate container matches)
+ * - Nested spans (spans with parentIterations pointing through this container)
+ */
+function buildContainerChildren(
+ containerType: 'parallel' | 'loop',
+ containerId: string,
+ spans: TraceSpan[],
+ normalSpans: TraceSpan[],
+ counters: ContainerNameCounters
+): TraceSpan[] {
+ const iterationType = containerType === 'parallel' ? 'parallel-iteration' : 'loop-iteration'
+
+ const iterationGroups = new Map()
+
+ for (const span of spans) {
+ let iterIdx: number | undefined
+
+ if (
+ span.parentIterations &&
+ span.parentIterations.length > 0 &&
+ span.parentIterations[0].iterationContainerId === containerId
+ ) {
+ iterIdx = span.parentIterations[0].iterationCurrent
+ } else {
+ iterIdx = span.iterationIndex
+ }
+
+ if (iterIdx === undefined) {
+ logger.warn('Skipping iteration span without iterationIndex', {
+ spanId: span.id,
+ blockId: span.blockId,
+ containerId,
+ })
+ continue
+ }
+
+ if (!iterationGroups.has(iterIdx)) iterationGroups.set(iterIdx, [])
+ iterationGroups.get(iterIdx)!.push(span)
+ }
+
+ const iterationChildren: TraceSpan[] = []
+ const sortedIterations = Array.from(iterationGroups.entries()).sort(([a], [b]) => a - b)
+
+ for (const [iterationIndex, iterSpans] of sortedIterations) {
+ const directLeaves: TraceSpan[] = []
+ const deeperSpans: TraceSpan[] = []
+
+ for (const span of iterSpans) {
+ if (
+ span.parentIterations &&
+ span.parentIterations.length > 0 &&
+ span.parentIterations[0].iterationContainerId === containerId
+ ) {
+ deeperSpans.push({
+ ...span,
+ parentIterations: span.parentIterations.slice(1),
+ })
+ } else {
+ directLeaves.push({
+ ...span,
+ name: span.name.replace(/ \(iteration \d+\)$/, ''),
+ })
+ }
+ }
+
+ const nestedResult = groupIterationBlocksRecursive(
+ [...directLeaves, ...deeperSpans],
+ normalSpans,
+ counters
+ )
+
+ iterationChildren.push(
+ buildContainerSpan({
+ id: `${containerId}-iteration-${iterationIndex}`,
+ name: `Iteration ${iterationIndex}`,
+ type: iterationType,
+ sourceSpans: iterSpans,
+ children: nestedResult,
+ })
+ )
+ }
+
+ return iterationChildren
+}
+
+/**
+ * Core recursive algorithm for grouping iteration blocks.
+ *
+ * Handles two cases:
+ * 1. **Flat** (backward compat): spans have loopId/parallelId + iterationIndex but no
+ * parentIterations. Grouped by immediate container -> iteration -> leaf.
+ * 2. **Nested** (new): spans have parentIterations chains. The outermost ancestor in the
+ * chain determines the top-level container. Iteration spans are peeled one level at a
+ * time and recursed.
+ */
+function groupIterationBlocksRecursive(
+ spans: TraceSpan[],
+ normalSpans: TraceSpan[],
+ counters: ContainerNameCounters
+): TraceSpan[] {
+ const result: TraceSpan[] = []
+ const iterationSpans: TraceSpan[] = []
+ const nonIterationSpans: TraceSpan[] = []
+
+ for (const span of spans) {
+ if (
+ span.name.match(/^(.+) \(iteration (\d+)\)$/) ||
+ (span.parentIterations && span.parentIterations.length > 0)
+ ) {
+ iterationSpans.push(span)
+ } else {
+ nonIterationSpans.push(span)
+ }
+ }
+
+ const containerIdsWithIterations = new Set()
+ for (const span of iterationSpans) {
+ const outermost = getOutermostContainer(span)
+ if (outermost) containerIdsWithIterations.add(outermost.containerId)
+ }
+
+ const nonContainerSpans = nonIterationSpans.filter(
+ (span) =>
+ (span.type !== 'parallel' && span.type !== 'loop') ||
+ span.status === 'error' ||
+ (span.blockId && !containerIdsWithIterations.has(span.blockId))
+ )
+
+ if (iterationSpans.length === 0) {
+ result.push(...nonContainerSpans)
+ result.sort((a, b) => new Date(a.startTime).getTime() - new Date(b.startTime).getTime())
+ return result
+ }
+
+ const containerGroups = new Map<
+ string,
+ { type: 'parallel' | 'loop'; containerId: string; containerName: string; spans: TraceSpan[] }
+ >()
+
+ for (const span of iterationSpans) {
+ const outermost = getOutermostContainer(span)
+ if (!outermost) continue
+
+ const { containerId, containerType } = outermost
+ const groupKey = `${containerType}_${containerId}`
+
+ if (!containerGroups.has(groupKey)) {
+ const containerName = resolveContainerName(containerId, containerType, normalSpans, counters)
+ containerGroups.set(groupKey, {
+ type: containerType,
+ containerId,
+ containerName,
+ spans: [],
+ })
+ }
+ containerGroups.get(groupKey)!.spans.push(span)
+ }
+
+ for (const [, group] of containerGroups) {
+ const { type, containerId, containerName, spans: containerSpans } = group
+
+ const iterationChildren = buildContainerChildren(
+ type,
+ containerId,
+ containerSpans,
+ normalSpans,
+ counters
+ )
+
+ result.push(
+ buildContainerSpan({
+ id: `${type === 'parallel' ? 'parallel' : 'loop'}-execution-${containerId}`,
+ name: containerName,
+ type,
+ sourceSpans: containerSpans,
+ children: iterationChildren,
+ })
+ )
+ }
+
+ result.push(...nonContainerSpans)
+ result.sort((a, b) => new Date(a.startTime).getTime() - new Date(b.startTime).getTime())
+
+ return result
+}
+
+/**
+ * Groups iteration-based blocks (parallel and loop) by organizing their iteration spans
+ * into a hierarchical structure with proper parent-child relationships.
+ * Supports recursive nesting via parentIterations (e.g., parallel-in-parallel, loop-in-loop).
+ */
+export function groupIterationBlocks(spans: TraceSpan[]): TraceSpan[] {
+ const normalSpans = spans.filter((s) => !s.name.match(/^(.+) \(iteration (\d+)\)$/))
+ const counters: ContainerNameCounters = {
+ loopNumbers: new Map(),
+ parallelNumbers: new Map(),
+ loopCounter: 1,
+ parallelCounter: 1,
+ }
+ return groupIterationBlocksRecursive(spans, normalSpans, counters)
+}
diff --git a/apps/sim/lib/logs/execution/trace-spans/span-factory.ts b/apps/sim/lib/logs/execution/trace-spans/span-factory.ts
new file mode 100644
index 00000000000..1c3bc87a4b1
--- /dev/null
+++ b/apps/sim/lib/logs/execution/trace-spans/span-factory.ts
@@ -0,0 +1,385 @@
+import { createLogger } from '@sim/logger'
+import type { ProviderTiming, TraceSpan } from '@/lib/logs/types'
+import {
+ isConditionBlockType,
+ isWorkflowBlockType,
+ stripCustomToolPrefix,
+} from '@/executor/constants'
+import type {
+ BlockLog,
+ BlockToolCall,
+ NormalizedBlockOutput,
+ ProviderTimingSegment,
+} from '@/executor/types'
+
+const logger = createLogger('SpanFactory')
+
+const STREAMING_SEGMENT_NAME = 'Streaming response'
+
+/** A BlockLog that has already passed the id/type validity check. */
+type ValidBlockLog = BlockLog & { blockType: string }
+
+/**
+ * Creates a TraceSpan from a BlockLog. Returns null for invalid logs.
+ *
+ * Children are unified under `span.children` regardless of source:
+ * - Provider `timeSegments` become model/tool child spans with tool I/O merged in
+ * - `output.toolCalls` (no segments) become tool child spans
+ * - Child workflow spans are flattened into children
+ */
+export function createSpanFromLog(log: BlockLog): TraceSpan | null {
+ if (!log.blockId || !log.blockType) return null
+ const validLog = log as ValidBlockLog
+
+ const span = createBaseSpan(validLog)
+
+ if (!isConditionBlockType(validLog.blockType)) {
+ enrichWithProviderMetadata(span, validLog)
+
+ if (!isWorkflowBlockType(validLog.blockType)) {
+ const segments = validLog.output?.providerTiming?.timeSegments
+ span.children = segments
+ ? buildChildrenFromTimeSegments(span, validLog, segments)
+ : buildChildrenFromToolCalls(span, validLog)
+ }
+ }
+
+ if (isWorkflowBlockType(validLog.blockType)) {
+ attachChildWorkflowSpans(span, validLog)
+ }
+
+ return span
+}
+
+/** Creates the base span with id, name, type, timing, status, and metadata. */
+function createBaseSpan(log: ValidBlockLog): TraceSpan {
+ const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}`
+ const output = extractDisplayOutput(log)
+ const childIds = extractChildWorkflowIds(log.output)
+
+ return {
+ id: spanId,
+ name: log.blockName ?? log.blockId,
+ type: log.blockType,
+ duration: log.durationMs,
+ startTime: log.startedAt,
+ endTime: log.endedAt,
+ status: log.error ? 'error' : 'success',
+ children: [],
+ blockId: log.blockId,
+ input: log.input,
+ output,
+ ...(childIds ?? {}),
+ ...(log.errorHandled && { errorHandled: true }),
+ ...(log.loopId && { loopId: log.loopId }),
+ ...(log.parallelId && { parallelId: log.parallelId }),
+ ...(log.iterationIndex !== undefined && { iterationIndex: log.iterationIndex }),
+ ...(log.parentIterations?.length && { parentIterations: log.parentIterations }),
+ }
+}
+
+/**
+ * Strips internal fields from the block output for display and merges
+ * the block-level error into output so the UI renders it alongside data.
+ */
+function extractDisplayOutput(log: ValidBlockLog): Record {
+ const { childWorkflowSnapshotId, childWorkflowId, ...rest } = log.output ?? {}
+ return log.error ? { ...rest, error: log.error } : rest
+}
+
+/** Pulls child-workflow identifiers off the output so they can live on the span. */
+function extractChildWorkflowIds(
+ output: NormalizedBlockOutput | undefined
+): { childWorkflowSnapshotId?: string; childWorkflowId?: string } | undefined {
+ if (!output) return undefined
+ const ids: { childWorkflowSnapshotId?: string; childWorkflowId?: string } = {}
+ if (typeof output.childWorkflowSnapshotId === 'string') {
+ ids.childWorkflowSnapshotId = output.childWorkflowSnapshotId
+ }
+ if (typeof output.childWorkflowId === 'string') {
+ ids.childWorkflowId = output.childWorkflowId
+ }
+ return ids.childWorkflowSnapshotId || ids.childWorkflowId ? ids : undefined
+}
+
+/** Enriches a span with provider timing, cost, tokens, and model from block output. */
+function enrichWithProviderMetadata(span: TraceSpan, log: ValidBlockLog): void {
+ const output = log.output
+ if (!output) return
+
+ if (output.providerTiming) {
+ const pt = output.providerTiming
+ const timing: ProviderTiming = {
+ duration: pt.duration,
+ startTime: pt.startTime,
+ endTime: pt.endTime,
+ segments: pt.timeSegments ?? [],
+ }
+ span.providerTiming = timing
+ }
+
+ if (output.cost) {
+ const { input, output: out, total } = output.cost
+ span.cost = { input, output: out, total }
+ }
+
+ if (output.tokens) {
+ const t = output.tokens
+ const input =
+ typeof t.input === 'number' ? t.input : typeof t.prompt === 'number' ? t.prompt : undefined
+ const outputTokens =
+ typeof t.output === 'number'
+ ? t.output
+ : typeof t.completion === 'number'
+ ? t.completion
+ : undefined
+ const totalExplicit = typeof t.total === 'number' ? t.total : undefined
+ const total =
+ totalExplicit ??
+ (input !== undefined || outputTokens !== undefined
+ ? (input ?? 0) + (outputTokens ?? 0)
+ : undefined)
+ span.tokens = {
+ ...(input !== undefined && { input }),
+ ...(outputTokens !== undefined && { output: outputTokens }),
+ ...(total !== undefined && { total }),
+ }
+ }
+
+ if (typeof output.model === 'string') {
+ span.model = output.model
+ }
+}
+
+/**
+ * Builds child spans from provider `timeSegments`, matching tool segments to
+ * their corresponding tool call I/O by name in sequential order.
+ */
+function buildChildrenFromTimeSegments(
+ span: TraceSpan,
+ log: ValidBlockLog,
+ segments: ProviderTimingSegment[]
+): TraceSpan[] {
+ const toolCallsByName = groupToolCallsByName(resolveToolCallsList(log.output))
+ const toolCallIndices = new Map()
+
+ return segments.map((segment, index) => {
+ const segmentStartTime = new Date(segment.startTime).toISOString()
+ let segmentEndTime = new Date(segment.endTime).toISOString()
+ let segmentDuration = segment.duration
+
+ // Streaming segments sometimes close before the block ends; extend the
+ // trailing streaming segment to the block endTime so the bar fills.
+ if (segment.name === STREAMING_SEGMENT_NAME && log.endedAt) {
+ const blockEndMs = new Date(log.endedAt).getTime()
+ const segmentEndMs = new Date(segment.endTime).getTime()
+ if (blockEndMs > segmentEndMs) {
+ segmentEndTime = log.endedAt
+ segmentDuration = blockEndMs - new Date(segment.startTime).getTime()
+ }
+ }
+
+ if (segment.type === 'tool') {
+ const normalizedName = stripCustomToolPrefix(segment.name ?? '')
+ const callsForName = toolCallsByName.get(normalizedName) ?? []
+ const currentIndex = toolCallIndices.get(normalizedName) ?? 0
+ const match = callsForName[currentIndex]
+ toolCallIndices.set(normalizedName, currentIndex + 1)
+
+ const toolChild: TraceSpan = {
+ id: `${span.id}-segment-${index}`,
+ name: normalizedName,
+ type: 'tool',
+ duration: segment.duration,
+ startTime: segmentStartTime,
+ endTime: segmentEndTime,
+ status: match?.error || segment.errorMessage ? 'error' : 'success',
+ input: match?.arguments ?? match?.input,
+ output: match?.error
+ ? { error: match.error, ...(match.result ?? match.output ?? {}) }
+ : (match?.result ?? match?.output),
+ }
+ if (segment.toolCallId) toolChild.toolCallId = segment.toolCallId
+ if (segment.errorType) toolChild.errorType = segment.errorType
+ if (segment.errorMessage) toolChild.errorMessage = segment.errorMessage
+ return toolChild
+ }
+
+ const modelChild: TraceSpan = {
+ id: `${span.id}-segment-${index}`,
+ name: segment.name ?? 'Model',
+ type: 'model',
+ duration: segmentDuration,
+ startTime: segmentStartTime,
+ endTime: segmentEndTime,
+ status: segment.errorMessage ? 'error' : 'success',
+ }
+
+ if (segment.assistantContent) {
+ modelChild.output = { content: segment.assistantContent }
+ }
+ if (segment.thinkingContent) {
+ modelChild.thinking = segment.thinkingContent
+ }
+ if (segment.toolCalls && segment.toolCalls.length > 0) {
+ modelChild.modelToolCalls = segment.toolCalls
+ }
+ if (segment.finishReason) {
+ modelChild.finishReason = segment.finishReason
+ }
+ if (segment.tokens) {
+ modelChild.tokens = segment.tokens
+ }
+ if (segment.cost) {
+ modelChild.cost = segment.cost
+ }
+ if (typeof segment.ttft === 'number' && segment.ttft >= 0) {
+ modelChild.ttft = segment.ttft
+ }
+ if (span.model) {
+ modelChild.model = span.model
+ }
+ if (segment.provider) {
+ modelChild.provider = segment.provider
+ }
+ if (segment.errorType) {
+ modelChild.errorType = segment.errorType
+ }
+ if (segment.errorMessage) {
+ modelChild.errorMessage = segment.errorMessage
+ }
+
+ return modelChild
+ })
+}
+
+/**
+ * Builds tool-call child spans when the provider did not emit `timeSegments`.
+ * Each tool call becomes a full TraceSpan of `type: 'tool'`.
+ */
+function buildChildrenFromToolCalls(span: TraceSpan, log: ValidBlockLog): TraceSpan[] {
+ const toolCalls = resolveToolCallsList(log.output)
+ if (toolCalls.length === 0) return []
+
+ return toolCalls.map((tc, index) => {
+ const startTime = tc.startTime ?? log.startedAt
+ const endTime = tc.endTime ?? log.endedAt
+ return {
+ id: `${span.id}-tool-${index}`,
+ name: stripCustomToolPrefix(tc.name ?? 'unnamed-tool'),
+ type: 'tool',
+ duration: tc.duration ?? 0,
+ startTime,
+ endTime,
+ status: tc.error ? 'error' : 'success',
+ input: tc.arguments ?? tc.input,
+ output: tc.error
+ ? { error: tc.error, ...(tc.result ?? tc.output ?? {}) }
+ : (tc.result ?? tc.output),
+ }
+ })
+}
+
+/** Groups tool calls by their stripped name for sequential matching against segments. */
+function groupToolCallsByName(toolCalls: BlockToolCall[]): Map {
+ const byName = new Map()
+ for (const tc of toolCalls) {
+ const name = stripCustomToolPrefix(tc.name ?? '')
+ const list = byName.get(name)
+ if (list) list.push(tc)
+ else byName.set(name, [tc])
+ }
+ return byName
+}
+
+/**
+ * Resolves the tool calls list from block output. Providers write a normalized
+ * `{list, count}` container; a legacy streaming path embeds calls under
+ * `executionData.output.toolCalls`. The `Array.isArray` branches guard against
+ * persisted logs from before the container shape was normalized, where
+ * `toolCalls` was stored as a plain array — still observed in older DB rows.
+ */
+function resolveToolCallsList(output: NormalizedBlockOutput | undefined): BlockToolCall[] {
+ if (!output) return []
+
+ const direct = output.toolCalls
+ if (direct) {
+ if (Array.isArray(direct)) return direct
+ if (direct.list) return direct.list
+ logger.warn('Unexpected toolCalls shape on block output — no list extracted', {
+ shape: typeof direct,
+ })
+ return []
+ }
+
+ const legacy = (output.executionData as { output?: { toolCalls?: unknown } } | undefined)?.output
+ ?.toolCalls
+ if (!legacy) return []
+ if (Array.isArray(legacy)) return legacy as BlockToolCall[]
+ if (typeof legacy === 'object' && legacy !== null && 'list' in legacy) {
+ return ((legacy as { list?: BlockToolCall[] }).list ?? []) as BlockToolCall[]
+ }
+ logger.warn('Unexpected legacy executionData.output.toolCalls shape — no list extracted', {
+ shape: typeof legacy,
+ })
+ return []
+}
+
+/** Extracts and flattens child workflow trace spans into the parent span's children. */
+function attachChildWorkflowSpans(span: TraceSpan, log: ValidBlockLog): void {
+ const childTraceSpans = log.childTraceSpans ?? log.output?.childTraceSpans
+ if (!childTraceSpans?.length) return
+
+ span.children = flattenWorkflowChildren(childTraceSpans)
+ span.output = stripChildTraceSpansFromOutput(span.output)
+}
+
+/** True when a span is a synthetic workflow wrapper (no blockId). */
+function isSyntheticWorkflowWrapper(span: TraceSpan): boolean {
+ return span.type === 'workflow' && !span.blockId
+}
+
+/** Reads nested `childTraceSpans` off a span's output, or `[]` if absent. */
+function extractOutputChildren(output: TraceSpan['output']): TraceSpan[] {
+ const nested = (output as { childTraceSpans?: TraceSpan[] } | undefined)?.childTraceSpans
+ return Array.isArray(nested) ? nested : []
+}
+
+/** Returns a copy of `output` with `childTraceSpans` removed, or undefined unchanged. */
+function stripChildTraceSpansFromOutput(
+ output: TraceSpan['output']
+): TraceSpan['output'] | undefined {
+ if (!output || !('childTraceSpans' in output)) return output
+ const { childTraceSpans: _, ...rest } = output as Record
+ return rest
+}
+
+/** Recursively flattens synthetic workflow wrappers, preserving real block spans. */
+function flattenWorkflowChildren(spans: TraceSpan[]): TraceSpan[] {
+ const flattened: TraceSpan[] = []
+
+ for (const span of spans) {
+ if (isSyntheticWorkflowWrapper(span)) {
+ if (span.children?.length) {
+ flattened.push(...flattenWorkflowChildren(span.children))
+ }
+ continue
+ }
+
+ const directChildren = span.children ?? []
+ const outputChildren = extractOutputChildren(span.output)
+ const allChildren = [...directChildren, ...outputChildren]
+
+ const nextSpan: TraceSpan = { ...span }
+ if (allChildren.length > 0) {
+ nextSpan.children = flattenWorkflowChildren(allChildren)
+ }
+ if (outputChildren.length > 0) {
+ nextSpan.output = stripChildTraceSpansFromOutput(nextSpan.output)
+ }
+
+ flattened.push(nextSpan)
+ }
+
+ return flattened
+}
diff --git a/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts b/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts
index dd226ee857a..f15ff6f2fc2 100644
--- a/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts
+++ b/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts
@@ -174,11 +174,12 @@ describe('buildTraceSpans', () => {
expect(traceSpans).toHaveLength(1)
const agentSpan = traceSpans[0]
expect(agentSpan.type).toBe('agent')
- expect(agentSpan.toolCalls).toBeDefined()
- expect(agentSpan.toolCalls).toHaveLength(2)
+ expect(agentSpan.children).toBeDefined()
+ expect(agentSpan.children).toHaveLength(2)
// Check first tool call
- const firstToolCall = agentSpan.toolCalls![0]
+ const firstToolCall = agentSpan.children![0]
+ expect(firstToolCall.type).toBe('tool')
expect(firstToolCall.name).toBe('test_tool') // custom_ prefix should be stripped
expect(firstToolCall.duration).toBe(1000)
expect(firstToolCall.status).toBe('success')
@@ -186,7 +187,8 @@ describe('buildTraceSpans', () => {
expect(firstToolCall.output).toEqual({ output: 'test output' })
// Check second tool call
- const secondToolCall = agentSpan.toolCalls![1]
+ const secondToolCall = agentSpan.children![1]
+ expect(secondToolCall.type).toBe('tool')
expect(secondToolCall.name).toBe('http_request')
expect(secondToolCall.duration).toBe(2000)
expect(secondToolCall.status).toBe('success')
@@ -238,10 +240,11 @@ describe('buildTraceSpans', () => {
expect(traceSpans).toHaveLength(1)
const agentSpan = traceSpans[0]
- expect(agentSpan.toolCalls).toBeDefined()
- expect(agentSpan.toolCalls).toHaveLength(1)
+ expect(agentSpan.children).toBeDefined()
+ expect(agentSpan.children).toHaveLength(1)
- const toolCall = agentSpan.toolCalls![0]
+ const toolCall = agentSpan.children![0]
+ expect(toolCall.type).toBe('tool')
expect(toolCall.name).toBe('serper_search')
expect(toolCall.duration).toBe(1500)
expect(toolCall.status).toBe('success')
@@ -293,10 +296,11 @@ describe('buildTraceSpans', () => {
expect(traceSpans).toHaveLength(1)
const agentSpan = traceSpans[0]
- expect(agentSpan.toolCalls).toBeDefined()
- expect(agentSpan.toolCalls).toHaveLength(1)
+ expect(agentSpan.children).toBeDefined()
+ expect(agentSpan.children).toHaveLength(1)
- const toolCall = agentSpan.toolCalls![0]
+ const toolCall = agentSpan.children![0]
+ expect(toolCall.type).toBe('tool')
expect(toolCall.name).toBe('analysis_tool') // custom_ prefix should be stripped
expect(toolCall.duration).toBe(2000)
expect(toolCall.status).toBe('success')
@@ -2082,4 +2086,124 @@ describe('nested subflow grouping via parentIterations', () => {
expect(parallel!.children).toHaveLength(2)
}
)
+
+ it.concurrent('propagates per-iteration segment content to model child spans', () => {
+ const result: ExecutionResult = {
+ success: true,
+ output: { content: 'final' },
+ logs: [
+ {
+ blockId: 'agent-1',
+ blockName: 'Agent',
+ blockType: 'agent',
+ startedAt: '2024-01-01T10:00:00.000Z',
+ endedAt: '2024-01-01T10:00:04.000Z',
+ durationMs: 4000,
+ success: true,
+ input: { userPrompt: 'hi' },
+ output: {
+ content: 'final',
+ model: 'claude-3-7-sonnet',
+ providerTiming: {
+ duration: 4000,
+ startTime: '2024-01-01T10:00:00.000Z',
+ endTime: '2024-01-01T10:00:04.000Z',
+ timeSegments: [
+ {
+ type: 'model',
+ name: 'claude-3-7-sonnet',
+ startTime: 1704103200000,
+ endTime: 1704103202000,
+ duration: 2000,
+ assistantContent: 'reasoning about request',
+ thinkingContent: 'let me think step by step',
+ toolCalls: [{ id: 'call_abc', name: 'lookup', arguments: { q: 'test' } }],
+ finishReason: 'tool_use',
+ tokens: { input: 100, output: 20, total: 120, cacheRead: 5, reasoning: 8 },
+ cost: { input: 0.001, output: 0.002, total: 0.003 },
+ ttft: 450,
+ provider: 'anthropic',
+ },
+ {
+ type: 'tool',
+ name: 'lookup',
+ startTime: 1704103202000,
+ endTime: 1704103203000,
+ duration: 1000,
+ toolCallId: 'call_abc',
+ errorType: 'TimeoutError',
+ errorMessage: 'tool timed out',
+ },
+ {
+ type: 'model',
+ name: 'claude-3-7-sonnet',
+ startTime: 1704103203000,
+ endTime: 1704103204000,
+ duration: 1000,
+ assistantContent: 'final answer',
+ finishReason: 'end_turn',
+ tokens: { input: 130, output: 10, total: 140 },
+ cost: { input: 0.002, output: 0.001, total: 0.003 },
+ provider: 'anthropic',
+ errorType: 'RateLimitError',
+ errorMessage: 'too many requests',
+ },
+ ],
+ },
+ toolCalls: {
+ list: [
+ {
+ name: 'lookup',
+ arguments: { q: 'test' },
+ result: { hit: true },
+ duration: 1000,
+ },
+ ],
+ count: 1,
+ },
+ },
+ },
+ ],
+ }
+
+ const { traceSpans } = buildTraceSpans(result)
+ const children = traceSpans[0].children!
+ expect(children).toHaveLength(3)
+
+ const [firstModel, tool, secondModel] = children
+
+ expect(firstModel.type).toBe('model')
+ expect(firstModel.output).toEqual({ content: 'reasoning about request' })
+ expect(firstModel.thinking).toBe('let me think step by step')
+ expect(firstModel.modelToolCalls).toEqual([
+ { id: 'call_abc', name: 'lookup', arguments: { q: 'test' } },
+ ])
+ expect(firstModel.finishReason).toBe('tool_use')
+ expect(firstModel.tokens).toEqual({
+ input: 100,
+ output: 20,
+ total: 120,
+ cacheRead: 5,
+ reasoning: 8,
+ })
+ expect(firstModel.cost).toEqual({ input: 0.001, output: 0.002, total: 0.003 })
+ expect(firstModel.ttft).toBe(450)
+ expect(firstModel.provider).toBe('anthropic')
+ expect(firstModel.status).toBe('success')
+
+ expect(tool.type).toBe('tool')
+ expect(tool.toolCallId).toBe('call_abc')
+ expect(tool.errorType).toBe('TimeoutError')
+ expect(tool.errorMessage).toBe('tool timed out')
+ expect(tool.status).toBe('error')
+
+ expect(secondModel.type).toBe('model')
+ expect(secondModel.output).toEqual({ content: 'final answer' })
+ expect(secondModel.thinking).toBeUndefined()
+ expect(secondModel.modelToolCalls).toBeUndefined()
+ expect(secondModel.finishReason).toBe('end_turn')
+ expect(secondModel.errorType).toBe('RateLimitError')
+ expect(secondModel.errorMessage).toBe('too many requests')
+ expect(secondModel.status).toBe('error')
+ })
})
diff --git a/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts b/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts
index f367058fd6f..1f2e2c9503a 100644
--- a/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts
+++ b/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts
@@ -1,14 +1,7 @@
-import { createLogger } from '@sim/logger'
-import type { ToolCall, TraceSpan } from '@/lib/logs/types'
-import {
- isConditionBlockType,
- isWorkflowBlockType,
- stripCustomToolPrefix,
-} from '@/executor/constants'
-import type { ExecutionResult } from '@/executor/types'
-import { stripCloneSuffixes } from '@/executor/utils/subflow-utils'
-
-const logger = createLogger('TraceSpans')
+import { groupIterationBlocks } from '@/lib/logs/execution/trace-spans/iteration-grouping'
+import { createSpanFromLog } from '@/lib/logs/execution/trace-spans/span-factory'
+import type { TraceSpan } from '@/lib/logs/types'
+import type { BlockLog, ExecutionResult } from '@/executor/types'
/**
* Keys that should be recursively filtered from output display.
@@ -43,820 +36,92 @@ export function filterHiddenOutputKeys(value: unknown): unknown {
return value
}
-function isSyntheticWorkflowWrapper(span: TraceSpan | undefined): boolean {
- if (!span || span.type !== 'workflow') return false
- return !span.blockId
-}
-
-function flattenWorkflowChildren(spans: TraceSpan[]): TraceSpan[] {
- const flattened: TraceSpan[] = []
-
- spans.forEach((span) => {
- if (isSyntheticWorkflowWrapper(span)) {
- if (span.children && Array.isArray(span.children)) {
- flattened.push(...flattenWorkflowChildren(span.children))
- }
- return
- }
-
- const processedSpan: TraceSpan = { ...span }
-
- const directChildren = Array.isArray(span.children) ? span.children : []
- const outputChildren =
- span.output &&
- typeof span.output === 'object' &&
- Array.isArray((span.output as { childTraceSpans?: TraceSpan[] }).childTraceSpans)
- ? ((span.output as { childTraceSpans?: TraceSpan[] }).childTraceSpans as TraceSpan[])
- : []
-
- const allChildren = [...directChildren, ...outputChildren]
- if (allChildren.length > 0) {
- processedSpan.children = flattenWorkflowChildren(allChildren)
- }
-
- if (outputChildren.length > 0 && processedSpan.output) {
- const { childTraceSpans: _, ...cleanOutput } = processedSpan.output as {
- childTraceSpans?: TraceSpan[]
- } & Record
- processedSpan.output = cleanOutput
- }
-
- flattened.push(processedSpan)
- })
-
- return flattened
-}
-
+/**
+ * Builds a hierarchical trace span tree from execution logs.
+ *
+ * Pipeline:
+ * 1. Each BlockLog becomes a TraceSpan via `createSpanFromLog`.
+ * 2. Spans are sorted by start time to form a flat list of root spans.
+ * 3. Loop/parallel iterations are grouped into container spans via `groupIterationBlocks`.
+ * 4. A synthetic "Workflow Execution" root wraps the grouped spans and provides
+ * relative timestamps + total duration derived from the earliest start / latest end.
+ */
export function buildTraceSpans(result: ExecutionResult): {
traceSpans: TraceSpan[]
totalDuration: number
} {
- if (!result.logs || result.logs.length === 0) {
+ if (!result.logs?.length) {
return { traceSpans: [], totalDuration: 0 }
}
- const spanMap = new Map()
-
- const parentChildMap = new Map()
-
- type Connection = { source: string; target: string }
- const workflowConnections: Connection[] = result.metadata?.workflowConnections || []
- if (workflowConnections.length > 0) {
- workflowConnections.forEach((conn: Connection) => {
- if (conn.source && conn.target) {
- parentChildMap.set(conn.target, conn.source)
- }
- })
- }
-
- result.logs.forEach((log) => {
- if (!log.blockId || !log.blockType) return
-
- const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}`
- const isCondition = isConditionBlockType(log.blockType)
-
- const duration = log.durationMs || 0
-
- let output = log.output || {}
- let childWorkflowSnapshotId: string | undefined
- let childWorkflowId: string | undefined
-
- if (output && typeof output === 'object') {
- const outputRecord = output as Record
- childWorkflowSnapshotId =
- typeof outputRecord.childWorkflowSnapshotId === 'string'
- ? outputRecord.childWorkflowSnapshotId
- : undefined
- childWorkflowId =
- typeof outputRecord.childWorkflowId === 'string' ? outputRecord.childWorkflowId : undefined
- if (childWorkflowSnapshotId || childWorkflowId) {
- const {
- childWorkflowSnapshotId: _childSnapshotId,
- childWorkflowId: _childWorkflowId,
- ...outputRest
- } = outputRecord
- output = outputRest
- }
- }
-
- if (log.error) {
- output = {
- ...output,
- error: log.error,
- }
- }
-
- const displayName = log.blockName || log.blockId
-
- const span: TraceSpan = {
- id: spanId,
- name: displayName,
- type: log.blockType,
- duration: duration,
- startTime: log.startedAt,
- endTime: log.endedAt,
- status: log.error ? 'error' : 'success',
- children: [],
- blockId: log.blockId,
- input: log.input || {},
- output: output,
- ...(childWorkflowSnapshotId ? { childWorkflowSnapshotId } : {}),
- ...(childWorkflowId ? { childWorkflowId } : {}),
- ...(log.errorHandled && { errorHandled: true }),
- ...(log.loopId && { loopId: log.loopId }),
- ...(log.parallelId && { parallelId: log.parallelId }),
- ...(log.iterationIndex !== undefined && { iterationIndex: log.iterationIndex }),
- ...(log.parentIterations?.length && { parentIterations: log.parentIterations }),
- }
-
- if (!isCondition && log.output?.providerTiming) {
- const providerTiming = log.output.providerTiming as {
- duration: number
- startTime: string
- endTime: string
- timeSegments?: Array<{
- type: string
- name?: string
- startTime: string | number
- endTime: string | number
- duration: number
- }>
- }
-
- span.providerTiming = {
- duration: providerTiming.duration,
- startTime: providerTiming.startTime,
- endTime: providerTiming.endTime,
- segments: providerTiming.timeSegments || [],
- }
- }
-
- if (!isCondition && log.output?.cost) {
- span.cost = log.output.cost as {
- input?: number
- output?: number
- total?: number
- }
- }
-
- if (!isCondition && log.output?.tokens) {
- const t = log.output.tokens as
- | number
- | {
- input?: number
- output?: number
- total?: number
- prompt?: number
- completion?: number
- }
- if (typeof t === 'number') {
- span.tokens = t
- } else if (typeof t === 'object') {
- const input = t.input ?? t.prompt
- const output = t.output ?? t.completion
- const total =
- t.total ??
- (typeof input === 'number' || typeof output === 'number'
- ? (input || 0) + (output || 0)
- : undefined)
- span.tokens = {
- ...(typeof input === 'number' ? { input } : {}),
- ...(typeof output === 'number' ? { output } : {}),
- ...(typeof total === 'number' ? { total } : {}),
- }
- } else {
- span.tokens = t
- }
- }
-
- if (!isCondition && log.output?.model) {
- span.model = log.output.model as string
- }
-
- if (
- !isWorkflowBlockType(log.blockType) &&
- !isCondition &&
- log.output?.providerTiming?.timeSegments &&
- Array.isArray(log.output.providerTiming.timeSegments)
- ) {
- const timeSegments = log.output.providerTiming.timeSegments
- const toolCallsData = log.output?.toolCalls?.list || log.output?.toolCalls || []
-
- const toolCallsByName = new Map>>()
- for (const tc of toolCallsData as Array<{ name?: string; [key: string]: unknown }>) {
- const normalizedName = stripCustomToolPrefix(tc.name || '')
- if (!toolCallsByName.has(normalizedName)) {
- toolCallsByName.set(normalizedName, [])
- }
- toolCallsByName.get(normalizedName)!.push(tc)
- }
-
- const toolCallIndices = new Map()
-
- span.children = timeSegments.map(
- (
- segment: {
- type: string
- name?: string
- startTime: string | number
- endTime: string | number
- duration: number
- },
- index: number
- ) => {
- const segmentStartTime = new Date(segment.startTime).toISOString()
- let segmentEndTime = new Date(segment.endTime).toISOString()
- let segmentDuration = segment.duration
-
- if (segment.name?.toLowerCase().includes('streaming') && log.endedAt) {
- const blockEndTime = new Date(log.endedAt).getTime()
- const segmentEndTimeMs = new Date(segment.endTime).getTime()
-
- if (blockEndTime > segmentEndTimeMs) {
- segmentEndTime = log.endedAt
- segmentDuration = blockEndTime - new Date(segment.startTime).getTime()
- }
- }
-
- if (segment.type === 'tool') {
- const normalizedName = stripCustomToolPrefix(segment.name || '')
-
- const toolCallsForName = toolCallsByName.get(normalizedName) || []
- const currentIndex = toolCallIndices.get(normalizedName) || 0
- const matchingToolCall = toolCallsForName[currentIndex] as
- | {
- error?: string
- arguments?: Record
- input?: Record
- result?: Record
- output?: Record
- }
- | undefined
-
- toolCallIndices.set(normalizedName, currentIndex + 1)
-
- return {
- id: `${span.id}-segment-${index}`,
- name: normalizedName,
- type: 'tool',
- duration: segment.duration,
- startTime: segmentStartTime,
- endTime: segmentEndTime,
- status: matchingToolCall?.error ? 'error' : 'success',
- input: matchingToolCall?.arguments || matchingToolCall?.input,
- output: matchingToolCall?.error
- ? {
- error: matchingToolCall.error,
- ...(matchingToolCall.result || matchingToolCall.output || {}),
- }
- : matchingToolCall?.result || matchingToolCall?.output,
- }
- }
- return {
- id: `${span.id}-segment-${index}`,
- name: segment.name,
- type: 'model',
- duration: segmentDuration,
- startTime: segmentStartTime,
- endTime: segmentEndTime,
- status: 'success',
- }
- }
- )
- } else if (!isCondition) {
- let toolCallsList = null
-
- try {
- if (log.output?.toolCalls?.list) {
- toolCallsList = log.output.toolCalls.list
- } else if (Array.isArray(log.output?.toolCalls)) {
- toolCallsList = log.output.toolCalls
- } else if (log.output?.executionData?.output?.toolCalls) {
- const tcObj = log.output.executionData.output.toolCalls
- toolCallsList = Array.isArray(tcObj) ? tcObj : tcObj.list || []
- }
-
- if (toolCallsList && !Array.isArray(toolCallsList)) {
- logger.warn(`toolCallsList is not an array: ${typeof toolCallsList}`, {
- blockId: log.blockId,
- blockType: log.blockType,
- })
- toolCallsList = []
- }
- } catch (error) {
- logger.error(`Error extracting toolCalls from block ${log.blockId}:`, error)
- toolCallsList = []
- }
-
- if (toolCallsList && toolCallsList.length > 0) {
- const processedToolCalls: ToolCall[] = []
-
- for (const tc of toolCallsList as Array<{
- name?: string
- duration?: number
- startTime?: string
- endTime?: string
- error?: string
- arguments?: Record
- input?: Record
- result?: Record
- output?: Record
- }>) {
- if (!tc) continue
-
- try {
- const toolCall: ToolCall = {
- name: stripCustomToolPrefix(tc.name || 'unnamed-tool'),
- duration: tc.duration || 0,
- startTime: tc.startTime || log.startedAt,
- endTime: tc.endTime || log.endedAt,
- status: tc.error ? 'error' : 'success',
- }
-
- if (tc.arguments || tc.input) {
- toolCall.input = tc.arguments || tc.input
- }
-
- if (tc.result || tc.output) {
- toolCall.output = tc.result || tc.output
- }
-
- if (tc.error) {
- toolCall.error = tc.error
- }
-
- processedToolCalls.push(toolCall)
- } catch (tcError) {
- logger.error(`Error processing tool call in block ${log.blockId}:`, tcError)
- }
- }
-
- span.toolCalls = processedToolCalls
- }
- }
-
- if (isWorkflowBlockType(log.blockType)) {
- const childTraceSpans = Array.isArray(log.childTraceSpans)
- ? log.childTraceSpans
- : Array.isArray(log.output?.childTraceSpans)
- ? (log.output.childTraceSpans as TraceSpan[])
- : null
-
- if (childTraceSpans) {
- const flattenedChildren = flattenWorkflowChildren(childTraceSpans)
- span.children = flattenedChildren
-
- if (span.output && typeof span.output === 'object' && 'childTraceSpans' in span.output) {
- const { childTraceSpans: _, ...cleanOutput } = span.output as {
- childTraceSpans?: TraceSpan[]
- } & Record
- span.output = cleanOutput
- }
- }
- }
-
- spanMap.set(spanId, span)
- })
-
- const sortedLogs = [...result.logs].sort((a, b) => {
- const aTime = new Date(a.startedAt).getTime()
- const bTime = new Date(b.startedAt).getTime()
- return aTime - bTime
- })
-
- const rootSpans: TraceSpan[] = []
-
- sortedLogs.forEach((log) => {
- if (!log.blockId) return
-
- const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}`
- const span = spanMap.get(spanId)
- if (span) {
- rootSpans.push(span)
- }
- })
-
- if (rootSpans.length === 0 && workflowConnections.length === 0) {
- const spanStack: TraceSpan[] = []
-
- sortedLogs.forEach((log) => {
- if (!log.blockId || !log.blockType) return
-
- const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}`
- const span = spanMap.get(spanId)
- if (!span) return
-
- if (spanStack.length > 0) {
- const potentialParent = spanStack[spanStack.length - 1]
- const parentStartTime = new Date(potentialParent.startTime).getTime()
- const parentEndTime = new Date(potentialParent.endTime).getTime()
- const spanStartTime = new Date(span.startTime).getTime()
-
- if (spanStartTime >= parentStartTime && spanStartTime <= parentEndTime) {
- if (!potentialParent.children) potentialParent.children = []
- potentialParent.children.push(span)
- } else {
- while (
- spanStack.length > 0 &&
- new Date(spanStack[spanStack.length - 1].endTime).getTime() < spanStartTime
- ) {
- spanStack.pop()
- }
+ const spans = buildRootSpansFromLogs(result.logs)
+ const grouped = groupIterationBlocks(spans)
- if (spanStack.length > 0) {
- const newParent = spanStack[spanStack.length - 1]
- if (!newParent.children) newParent.children = []
- newParent.children.push(span)
- } else {
- rootSpans.push(span)
- }
- }
- } else {
- rootSpans.push(span)
- }
-
- if (log.blockType === 'agent' || isWorkflowBlockType(log.blockType)) {
- spanStack.push(span)
- }
- })
+ if (grouped.length === 0 || !result.metadata) {
+ const totalDuration = grouped.reduce((sum, span) => sum + span.duration, 0)
+ return { traceSpans: grouped, totalDuration }
}
- const groupedRootSpans = groupIterationBlocks(rootSpans)
-
- const totalDuration = groupedRootSpans.reduce((sum, span) => sum + span.duration, 0)
-
- if (groupedRootSpans.length > 0 && result.metadata) {
- const allSpansList = Array.from(spanMap.values())
-
- const earliestStart = allSpansList.reduce((earliest, span) => {
- const startTime = new Date(span.startTime).getTime()
- return startTime < earliest ? startTime : earliest
- }, Number.POSITIVE_INFINITY)
-
- const latestEnd = allSpansList.reduce((latest, span) => {
- const endTime = new Date(span.endTime).getTime()
- return endTime > latest ? endTime : latest
- }, 0)
-
- const actualWorkflowDuration = latestEnd - earliestStart
-
- const addRelativeTimestamps = (spans: TraceSpan[], workflowStartMs: number) => {
- spans.forEach((span) => {
- span.relativeStartMs = new Date(span.startTime).getTime() - workflowStartMs
- if (span.children && span.children.length > 0) {
- addRelativeTimestamps(span.children, workflowStartMs)
- }
- })
- }
- addRelativeTimestamps(groupedRootSpans, earliestStart)
-
- const checkForUnhandledErrors = (s: TraceSpan): boolean => {
- if (s.status === 'error' && !s.errorHandled) return true
- return s.children ? s.children.some(checkForUnhandledErrors) : false
- }
- const hasUnhandledErrors = groupedRootSpans.some(checkForUnhandledErrors)
-
- const workflowSpan: TraceSpan = {
- id: 'workflow-execution',
- name: 'Workflow Execution',
- type: 'workflow',
- duration: actualWorkflowDuration, // Always use actual duration for the span
- startTime: new Date(earliestStart).toISOString(),
- endTime: new Date(latestEnd).toISOString(),
- status: hasUnhandledErrors ? 'error' : 'success',
- children: groupedRootSpans,
- }
+ return wrapInWorkflowRoot(grouped, spans)
+}
- return { traceSpans: [workflowSpan], totalDuration: actualWorkflowDuration }
+/** Converts each BlockLog into a TraceSpan, sorted chronologically by start time. */
+function buildRootSpansFromLogs(logs: BlockLog[]): TraceSpan[] {
+ const spans: TraceSpan[] = []
+ for (const log of logs) {
+ const span = createSpanFromLog(log)
+ if (span) spans.push(span)
}
-
- return { traceSpans: groupedRootSpans, totalDuration }
+ spans.sort((a, b) => new Date(a.startTime).getTime() - new Date(b.startTime).getTime())
+ return spans
}
/**
- * Builds a container-level TraceSpan (iteration wrapper or top-level container)
- * from its source spans and resolved children.
+ * Wraps grouped spans in a synthetic workflow-execution root span using the
+ * true workflow bounds (earliest start / latest end across all leaf spans).
*/
-function buildContainerSpan(opts: {
- id: string
- name: string
- type: string
- sourceSpans: TraceSpan[]
- children: TraceSpan[]
-}): TraceSpan {
- const startTimes = opts.sourceSpans.map((s) => new Date(s.startTime).getTime())
- const endTimes = opts.sourceSpans.map((s) => new Date(s.endTime).getTime())
- const earliestStart = Math.min(...startTimes)
- const latestEnd = Math.max(...endTimes)
-
- const hasErrors = opts.sourceSpans.some((s) => s.status === 'error')
- const allErrorsHandled =
- hasErrors && opts.children.every((s) => s.status !== 'error' || s.errorHandled)
-
- return {
- id: opts.id,
- name: opts.name,
- type: opts.type,
- duration: latestEnd - earliestStart,
+function wrapInWorkflowRoot(
+ grouped: TraceSpan[],
+ leafSpans: TraceSpan[]
+): { traceSpans: TraceSpan[]; totalDuration: number } {
+ let earliestStart = Number.POSITIVE_INFINITY
+ let latestEnd = 0
+ for (const span of leafSpans) {
+ const startTime = new Date(span.startTime).getTime()
+ const endTime = new Date(span.endTime).getTime()
+ if (startTime < earliestStart) earliestStart = startTime
+ if (endTime > latestEnd) latestEnd = endTime
+ }
+
+ const actualWorkflowDuration = latestEnd - earliestStart
+ addRelativeTimestamps(grouped, earliestStart)
+
+ const workflowSpan: TraceSpan = {
+ id: 'workflow-execution',
+ name: 'Workflow Execution',
+ type: 'workflow',
+ duration: actualWorkflowDuration,
startTime: new Date(earliestStart).toISOString(),
endTime: new Date(latestEnd).toISOString(),
- status: hasErrors ? 'error' : 'success',
- ...(allErrorsHandled && { errorHandled: true }),
- children: opts.children,
+ status: grouped.some(hasUnhandledError) ? 'error' : 'success',
+ children: grouped,
}
-}
-/** Counter state for generating sequential container names. */
-interface ContainerNameCounters {
- loopNumbers: Map
- parallelNumbers: Map
- loopCounter: number
- parallelCounter: number
+ return { traceSpans: [workflowSpan], totalDuration: actualWorkflowDuration }
}
-/**
- * Resolves a container name from normal (non-iteration) spans or assigns a sequential number.
- * Strips clone suffixes so all clones of the same container share one name/number.
- */
-function resolveContainerName(
- containerId: string,
- containerType: 'parallel' | 'loop',
- normalSpans: TraceSpan[],
- counters: ContainerNameCounters
-): string {
- const originalId = stripCloneSuffixes(containerId)
-
- const matchingBlock = normalSpans.find(
- (s) => s.blockId === originalId && s.type === containerType
- )
- if (matchingBlock?.name) return matchingBlock.name
-
- if (containerType === 'parallel') {
- if (!counters.parallelNumbers.has(originalId)) {
- counters.parallelNumbers.set(originalId, counters.parallelCounter++)
- }
- return `Parallel ${counters.parallelNumbers.get(originalId)}`
- }
- if (!counters.loopNumbers.has(originalId)) {
- counters.loopNumbers.set(originalId, counters.loopCounter++)
- }
- return `Loop ${counters.loopNumbers.get(originalId)}`
-}
-
-/**
- * Classifies a span's immediate container ID and type from its metadata.
- * Returns undefined for non-iteration spans.
- */
-function classifySpanContainer(
- span: TraceSpan
-): { containerId: string; containerType: 'parallel' | 'loop' } | undefined {
- if (span.parallelId) {
- return { containerId: span.parallelId, containerType: 'parallel' }
- }
- if (span.loopId) {
- return { containerId: span.loopId, containerType: 'loop' }
- }
- // Fallback: parse from blockId for legacy data
- if (span.blockId?.includes('_parallel_')) {
- const match = span.blockId.match(/_parallel_([^_]+)_iteration_/)
- if (match) {
- return { containerId: match[1], containerType: 'parallel' }
- }
- }
- return undefined
-}
-
-/**
- * Finds the outermost container for a span. For nested spans, this is parentIterations[0].
- * For flat spans, this is the span's own immediate container.
- */
-function getOutermostContainer(
- span: TraceSpan
-): { containerId: string; containerType: 'parallel' | 'loop' } | undefined {
- if (span.parentIterations && span.parentIterations.length > 0) {
- const outermost = span.parentIterations[0]
- return {
- containerId: outermost.iterationContainerId,
- containerType: outermost.iterationType as 'parallel' | 'loop',
- }
- }
- return classifySpanContainer(span)
-}
-
-/**
- * Builds the iteration-level hierarchy for a container, recursively nesting
- * any deeper subflows. Works with both:
- * - Direct spans (spans whose immediate container matches)
- * - Nested spans (spans with parentIterations pointing through this container)
- */
-function buildContainerChildren(
- containerType: 'parallel' | 'loop',
- containerId: string,
- spans: TraceSpan[],
- normalSpans: TraceSpan[],
- counters: ContainerNameCounters
-): TraceSpan[] {
- const iterationType = containerType === 'parallel' ? 'parallel-iteration' : 'loop-iteration'
-
- // Group spans by iteration index at this level.
- // Each span's iteration index at this level comes from:
- // - parentIterations[0].iterationCurrent if parentIterations[0].containerId === containerId
- // - span.iterationIndex if span's immediate container === containerId
- const iterationGroups = new Map()
-
- for (const span of spans) {
- let iterIdx: number | undefined
-
- if (
- span.parentIterations &&
- span.parentIterations.length > 0 &&
- span.parentIterations[0].iterationContainerId === containerId
- ) {
- iterIdx = span.parentIterations[0].iterationCurrent
- } else {
- // The span's immediate container is this container
- iterIdx = span.iterationIndex
- }
-
- if (iterIdx === undefined) continue
-
- if (!iterationGroups.has(iterIdx)) iterationGroups.set(iterIdx, [])
- iterationGroups.get(iterIdx)!.push(span)
- }
-
- const iterationChildren: TraceSpan[] = []
- const sortedIterations = Array.from(iterationGroups.entries()).sort(([a], [b]) => a - b)
-
- for (const [iterationIndex, iterSpans] of sortedIterations) {
- // For each span in this iteration, strip one level of ancestry and determine
- // whether it belongs to this container directly or to a deeper subflow
- const directLeaves: TraceSpan[] = []
- const deeperSpans: TraceSpan[] = []
-
- for (const span of iterSpans) {
- if (
- span.parentIterations &&
- span.parentIterations.length > 0 &&
- span.parentIterations[0].iterationContainerId === containerId
- ) {
- // Strip the outermost parentIteration (this container level)
- deeperSpans.push({
- ...span,
- parentIterations: span.parentIterations.slice(1),
- })
- } else {
- // This span's immediate container IS this container — it's a direct leaf
- directLeaves.push({
- ...span,
- name: span.name.replace(/ \(iteration \d+\)$/, ''),
- })
- }
- }
-
- // Recursively group the deeper spans (they'll form nested containers)
- const nestedResult = groupIterationBlocksRecursive(
- [...directLeaves, ...deeperSpans],
- normalSpans,
- counters
- )
-
- iterationChildren.push(
- buildContainerSpan({
- id: `${containerId}-iteration-${iterationIndex}`,
- name: `Iteration ${iterationIndex}`,
- type: iterationType,
- sourceSpans: iterSpans,
- children: nestedResult,
- })
- )
- }
-
- return iterationChildren
-}
-
-/**
- * Core recursive algorithm for grouping iteration blocks.
- *
- * Handles two cases:
- * 1. **Flat** (backward compat): spans have loopId/parallelId + iterationIndex but no
- * parentIterations. Grouped by immediate container → iteration → leaf.
- * 2. **Nested** (new): spans have parentIterations chains. The outermost ancestor in the
- * chain determines the top-level container. Iteration spans are peeled one level at a
- * time and recursed.
- *
- * Container BlockLogs (parallel/loop) are produced on skip (empty collection), error, and
- * successful completion. When present, they supply the user-configured container name via
- * `resolveContainerName`; otherwise the container is synthesized from iteration data with a
- * counter-based fallback name.
- */
-function groupIterationBlocksRecursive(
- spans: TraceSpan[],
- normalSpans: TraceSpan[],
- counters: ContainerNameCounters
-): TraceSpan[] {
- const result: TraceSpan[] = []
- const iterationSpans: TraceSpan[] = []
- const nonIterationSpans: TraceSpan[] = []
-
+/** Recursively annotates spans with `relativeStartMs` (ms since workflow start). */
+function addRelativeTimestamps(spans: TraceSpan[], workflowStartMs: number): void {
for (const span of spans) {
- if (
- span.name.match(/^(.+) \(iteration (\d+)\)$/) ||
- (span.parentIterations && span.parentIterations.length > 0)
- ) {
- iterationSpans.push(span)
- } else {
- nonIterationSpans.push(span)
- }
- }
-
- const containerIdsWithIterations = new Set()
- for (const span of iterationSpans) {
- const outermost = getOutermostContainer(span)
- if (outermost) containerIdsWithIterations.add(outermost.containerId)
- }
-
- const nonContainerSpans = nonIterationSpans.filter(
- (span) =>
- (span.type !== 'parallel' && span.type !== 'loop') ||
- span.status === 'error' ||
- (span.blockId && !containerIdsWithIterations.has(span.blockId))
- )
-
- if (iterationSpans.length === 0) {
- result.push(...nonContainerSpans)
- result.sort((a, b) => new Date(a.startTime).getTime() - new Date(b.startTime).getTime())
- return result
- }
-
- // Group iteration spans by outermost container
- const containerGroups = new Map<
- string,
- { type: 'parallel' | 'loop'; containerId: string; containerName: string; spans: TraceSpan[] }
- >()
-
- for (const span of iterationSpans) {
- const outermost = getOutermostContainer(span)
- if (!outermost) continue
-
- const { containerId, containerType } = outermost
- const groupKey = `${containerType}_${containerId}`
-
- if (!containerGroups.has(groupKey)) {
- const containerName = resolveContainerName(containerId, containerType, normalSpans, counters)
- containerGroups.set(groupKey, {
- type: containerType,
- containerId,
- containerName,
- spans: [],
- })
+ span.relativeStartMs = new Date(span.startTime).getTime() - workflowStartMs
+ if (span.children?.length) {
+ addRelativeTimestamps(span.children, workflowStartMs)
}
- containerGroups.get(groupKey)!.spans.push(span)
- }
-
- // Build each container with recursive nesting
- for (const [, group] of containerGroups) {
- const { type, containerId, containerName, spans: containerSpans } = group
-
- const iterationChildren = buildContainerChildren(
- type,
- containerId,
- containerSpans,
- normalSpans,
- counters
- )
-
- result.push(
- buildContainerSpan({
- id: `${type === 'parallel' ? 'parallel' : 'loop'}-execution-${containerId}`,
- name: containerName,
- type,
- sourceSpans: containerSpans,
- children: iterationChildren,
- })
- )
}
-
- result.push(...nonContainerSpans)
- result.sort((a, b) => new Date(a.startTime).getTime() - new Date(b.startTime).getTime())
-
- return result
}
-/**
- * Groups iteration-based blocks (parallel and loop) by organizing their iteration spans
- * into a hierarchical structure with proper parent-child relationships.
- * Supports recursive nesting via parentIterations (e.g., parallel-in-parallel, loop-in-loop).
- *
- * @param spans - Array of root spans to process
- * @returns Array of spans with iteration blocks properly grouped
- */
-function groupIterationBlocks(spans: TraceSpan[]): TraceSpan[] {
- const normalSpans = spans.filter((s) => !s.name.match(/^(.+) \(iteration (\d+)\)$/))
- const counters: ContainerNameCounters = {
- loopNumbers: new Map(),
- parallelNumbers: new Map(),
- loopCounter: 1,
- parallelCounter: 1,
- }
- return groupIterationBlocksRecursive(spans, normalSpans, counters)
+/** True if this span (or any descendant) has an unhandled error. */
+function hasUnhandledError(span: TraceSpan): boolean {
+ if (span.status === 'error' && !span.errorHandled) return true
+ return span.children?.some(hasUnhandledError) ?? false
}
diff --git a/apps/sim/lib/logs/types.ts b/apps/sim/lib/logs/types.ts
index 20f568ab41c..e64fc91d56c 100644
--- a/apps/sim/lib/logs/types.ts
+++ b/apps/sim/lib/logs/types.ts
@@ -1,7 +1,13 @@
import type { Edge } from 'reactflow'
import type { AsyncExecutionCorrelation } from '@/lib/core/async-jobs/types'
import type { ParentIteration, SerializableExecutionState } from '@/executor/execution/types'
-import type { BlockLog, NormalizedBlockOutput } from '@/executor/types'
+import type {
+ BlockLog,
+ BlockTokens,
+ IterationToolCall,
+ NormalizedBlockOutput,
+ ProviderTimingSegment,
+} from '@/executor/types'
import type { Loop, Parallel, WorkflowState } from '@/stores/workflows/workflow/types'
export type { WorkflowState, Loop, Parallel }
@@ -149,6 +155,7 @@ export interface WorkflowExecutionLog {
>
executionState?: SerializableExecutionState
finalOutput?: any
+ workflowInput?: unknown
errorDetails?: {
blockId: string
blockName: string
@@ -179,25 +186,13 @@ export interface WorkflowExecutionLog {
export type WorkflowExecutionLogInsert = Omit
export type WorkflowExecutionLogSelect = WorkflowExecutionLog
-export interface TokenInfo {
- input?: number
- output?: number
- total?: number
- prompt?: number
- completion?: number
-}
+export type TokenInfo = BlockTokens
export interface ProviderTiming {
duration: number
startTime: string
endTime: string
- segments: Array<{
- type: string
- name?: string
- startTime: string | number
- endTime: string | number
- duration: number
- }>
+ segments: ProviderTimingSegment[]
}
export interface TraceSpan {
@@ -208,11 +203,15 @@ export interface TraceSpan {
startTime: string
endTime: string
children?: TraceSpan[]
+ /**
+ * @deprecated Tool invocations are emitted as `children` with `type: 'tool'`.
+ * This field only appears on legacy trace spans persisted before the unification.
+ */
toolCalls?: ToolCall[]
status?: 'success' | 'error'
/** Whether this block's error was handled by an error handler path */
errorHandled?: boolean
- tokens?: number | TokenInfo
+ tokens?: TokenInfo
relativeStartMs?: number
blockId?: string
input?: Record
@@ -230,6 +229,43 @@ export interface TraceSpan {
parallelId?: string
iterationIndex?: number
parentIterations?: ParentIteration[]
+ /**
+ * For model child spans: the assistant's thinking/reasoning blocks from this
+ * iteration, stringified. Surfaces Anthropic extended thinking and equivalents.
+ */
+ thinking?: string
+ /**
+ * For model child spans: the tool calls the assistant requested in this
+ * iteration. `id` is the provider-assigned `tool_call.id`, used to correlate
+ * the following tool child span via its `toolCallId` field.
+ */
+ modelToolCalls?: IterationToolCall[]
+ /**
+ * For model child spans: the provider-reported stop reason
+ * (`stop`, `tool_use`, `length`, …).
+ */
+ finishReason?: string
+ /**
+ * For tool child spans: the `tool_call.id` this tool invocation satisfies.
+ * Matches one of the preceding model child's `modelToolCalls[i].id`.
+ */
+ toolCallId?: string
+ /**
+ * For model child spans: time-to-first-token in ms (streaming runs only).
+ */
+ ttft?: number
+ /**
+ * For model child spans: the provider system identifier
+ * (`anthropic`, `openai`, `gemini`, …) — aligns with OTel `gen_ai.system`.
+ */
+ provider?: string
+ /**
+ * For failed child spans: structured error class
+ * (e.g. `rate_limit`, `context_length`).
+ */
+ errorType?: string
+ /** For failed child spans: human-readable error message. */
+ errorMessage?: string
}
export interface WorkflowExecutionSummary {
diff --git a/apps/sim/lib/tokenization/streaming.ts b/apps/sim/lib/tokenization/streaming.ts
index 047fd0b8b38..ca552fa8292 100644
--- a/apps/sim/lib/tokenization/streaming.ts
+++ b/apps/sim/lib/tokenization/streaming.ts
@@ -49,13 +49,19 @@ export function processStreamingBlockLog(log: BlockLog, streamedContent: string)
const inputText = extractTextContent(log.input)
// Calculate streaming cost
+ const systemPrompt =
+ typeof log.input?.systemPrompt === 'string' ? log.input.systemPrompt : undefined
+ const context = typeof log.input?.context === 'string' ? log.input.context : undefined
+ const messages = Array.isArray(log.input?.messages)
+ ? (log.input.messages as Array<{ role: string; content: string }>)
+ : undefined
const result = calculateStreamingCost(
model,
inputText,
streamedContent,
- log.input?.systemPrompt,
- log.input?.context,
- log.input?.messages
+ systemPrompt,
+ context,
+ messages
)
// Update the log output with tokenization data
@@ -102,8 +108,9 @@ function getModelForBlock(log: BlockLog): string {
}
// Try to get model from input
- if (log.input?.model?.trim()) {
- return log.input.model
+ const inputModel = log.input?.model
+ if (typeof inputModel === 'string' && inputModel.trim()) {
+ return inputModel
}
// Use block type specific defaults
diff --git a/apps/sim/lib/tokenization/utils.ts b/apps/sim/lib/tokenization/utils.ts
index e3c3c3287d0..72bcf9ac420 100644
--- a/apps/sim/lib/tokenization/utils.ts
+++ b/apps/sim/lib/tokenization/utils.ts
@@ -11,6 +11,7 @@ import {
} from '@/lib/tokenization/constants'
import { createTokenizationError } from '@/lib/tokenization/errors'
import type { ProviderTokenizationConfig, TokenUsage } from '@/lib/tokenization/types'
+import type { BlockTokens } from '@/executor/types'
import { getProviderFromModel } from '@/providers/utils'
const logger = createLogger('TokenizationUtils')
@@ -56,9 +57,11 @@ export function isTokenizableBlockType(blockType?: string): boolean {
/**
* Checks if tokens/cost data is meaningful (non-zero)
*/
-export function hasRealTokenData(tokens?: TokenUsage): boolean {
+export function hasRealTokenData(
+ tokens?: Pick
+): boolean {
if (!tokens) return false
- return tokens.total > 0 || tokens.input > 0 || tokens.output > 0
+ return (tokens.total ?? 0) > 0 || (tokens.input ?? 0) > 0 || (tokens.output ?? 0) > 0
}
/**
diff --git a/apps/sim/providers/anthropic/core.ts b/apps/sim/providers/anthropic/core.ts
index c51d1420188..bda5c2f6f4a 100644
--- a/apps/sim/providers/anthropic/core.ts
+++ b/apps/sim/providers/anthropic/core.ts
@@ -3,7 +3,7 @@ import { transformJSONSchema } from '@anthropic-ai/sdk/lib/transform-json-schema
import type { RawMessageStreamEvent } from '@anthropic-ai/sdk/resources/messages/messages'
import type { Logger } from '@sim/logger'
import { toError } from '@sim/utils/errors'
-import type { StreamingExecution } from '@/executor/types'
+import type { BlockTokens, IterationToolCall, StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import {
checkForForcedToolUsage,
@@ -15,6 +15,7 @@ import {
supportsNativeStructuredOutputs,
supportsTemperature,
} from '@/providers/models'
+import { enrichLastModelSegment } from '@/providers/trace-enrichment'
import type { ProviderRequest, ProviderResponse, TimeSegment } from '@/providers/types'
import { ProviderError } from '@/providers/types'
import {
@@ -446,7 +447,7 @@ export async function executeAnthropicProviderRequest(
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -516,7 +517,7 @@ export async function executeAnthropicProviderRequest(
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -546,6 +547,11 @@ export async function executeAnthropicProviderRequest(
}
const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use')
+
+ enrichLastModelSegmentFromAnthropicResponse(timeSegments, currentResponse, textContent, {
+ model: request.model,
+ })
+
if (!toolUses || toolUses.length === 0) {
break
}
@@ -622,6 +628,7 @@ export async function executeAnthropicProviderRequest(
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolUse.id,
})
let resultContent: unknown
@@ -751,7 +758,7 @@ export async function executeAnthropicProviderRequest(
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -768,6 +775,16 @@ export async function executeAnthropicProviderRequest(
iterationCount++
}
+
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ const trailingText = currentResponse.content
+ .filter((item) => item.type === 'text')
+ .map((item) => item.text)
+ .join('\n')
+ enrichLastModelSegmentFromAnthropicResponse(timeSegments, currentResponse, trailingText, {
+ model: request.model,
+ })
+ }
} catch (error) {
logger.error(`Error in ${providerLabel} request:`, { error })
throw error
@@ -930,7 +947,7 @@ export async function executeAnthropicProviderRequest(
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -960,6 +977,11 @@ export async function executeAnthropicProviderRequest(
}
const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use')
+
+ enrichLastModelSegmentFromAnthropicResponse(timeSegments, currentResponse, textContent, {
+ model: request.model,
+ })
+
if (!toolUses || toolUses.length === 0) {
break
}
@@ -1038,6 +1060,7 @@ export async function executeAnthropicProviderRequest(
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolUseId,
})
let resultContent: unknown
@@ -1165,7 +1188,7 @@ export async function executeAnthropicProviderRequest(
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -1191,6 +1214,16 @@ export async function executeAnthropicProviderRequest(
iterationCount++
}
+
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ const trailingText = currentResponse.content
+ .filter((item) => item.type === 'text')
+ .map((item) => item.text)
+ .join('\n')
+ enrichLastModelSegmentFromAnthropicResponse(timeSegments, currentResponse, trailingText, {
+ model: request.model,
+ })
+ }
} catch (error) {
logger.error(`Error in ${providerLabel} request:`, { error })
throw error
@@ -1336,3 +1369,87 @@ export async function executeAnthropicProviderRequest(
})
}
}
+
+/**
+ * Enriches the last model segment with content from an Anthropic `Message`:
+ * assistant text, thinking/redacted_thinking blocks, tool_use calls (with IDs),
+ * stop_reason, and per-iteration tokens.
+ */
+function enrichLastModelSegmentFromAnthropicResponse(
+ timeSegments: TimeSegment[],
+ response: Anthropic.Messages.Message,
+ textContent: string,
+ extras?: {
+ model?: string
+ ttft?: number
+ errorType?: string
+ errorMessage?: string
+ }
+): void {
+ const thinkingBlocks = response.content.filter(
+ (item): item is Anthropic.Messages.ThinkingBlock | Anthropic.Messages.RedactedThinkingBlock =>
+ item.type === 'thinking' || item.type === 'redacted_thinking'
+ )
+ const thinkingContent = thinkingBlocks
+ .map((b) => (b.type === 'thinking' ? b.thinking : '[redacted]'))
+ .join('\n\n')
+
+ const toolUseBlocks = response.content.filter(
+ (item): item is Anthropic.Messages.ToolUseBlock => item.type === 'tool_use'
+ )
+ const toolCalls: IterationToolCall[] = toolUseBlocks.map((t) => ({
+ id: t.id,
+ name: t.name,
+ arguments:
+ t.input && typeof t.input === 'object' && !Array.isArray(t.input)
+ ? (t.input as Record)
+ : {},
+ }))
+
+ const segmentTokens = response.usage ? buildAnthropicSegmentTokens(response.usage) : undefined
+
+ let cost: { input: number; output: number; total: number } | undefined
+ if (
+ extras?.model &&
+ segmentTokens &&
+ typeof segmentTokens.input === 'number' &&
+ typeof segmentTokens.output === 'number'
+ ) {
+ const useCached = (segmentTokens.cacheRead ?? 0) > 0
+ const full = calculateCost(extras.model, segmentTokens.input, segmentTokens.output, useCached)
+ cost = { input: full.input, output: full.output, total: full.total }
+ }
+
+ enrichLastModelSegment(timeSegments, {
+ assistantContent: textContent || undefined,
+ thinkingContent: thinkingContent || undefined,
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
+ finishReason: response.stop_reason ?? undefined,
+ tokens: segmentTokens,
+ cost,
+ provider: 'anthropic',
+ ttft: extras?.ttft,
+ errorType: extras?.errorType,
+ errorMessage: extras?.errorMessage,
+ })
+}
+
+/**
+ * Builds a segment token breakdown from Anthropic usage data, surfacing prompt
+ * cache reads/writes separately and producing a corrected `total` that includes
+ * cache_creation tokens (which Anthropic bills as input tokens but omits from
+ * `input_tokens`).
+ */
+function buildAnthropicSegmentTokens(usage: Anthropic.Messages.Message['usage']): BlockTokens {
+ const input = usage.input_tokens ?? 0
+ const output = usage.output_tokens ?? 0
+ const cacheRead = usage.cache_read_input_tokens ?? 0
+ const cacheWrite = usage.cache_creation_input_tokens ?? 0
+ return {
+ input,
+ output,
+ total: input + output + cacheRead + cacheWrite,
+ ...(cacheRead > 0 && { cacheRead }),
+ ...(cacheWrite > 0 && { cacheWrite }),
+ }
+}
diff --git a/apps/sim/providers/azure-openai/index.ts b/apps/sim/providers/azure-openai/index.ts
index d60354c77af..a5c9fcd633f 100644
--- a/apps/sim/providers/azure-openai/index.ts
+++ b/apps/sim/providers/azure-openai/index.ts
@@ -25,6 +25,7 @@ import {
} from '@/providers/azure-openai/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import { executeResponsesProviderRequest } from '@/providers/openai/core'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
FunctionCallResponse,
ProviderConfig,
@@ -223,7 +224,7 @@ async function executeChatCompletionsRequest(
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -272,13 +273,20 @@ async function executeChatCompletionsRequest(
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
},
]
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'azure_openai' }
+ )
+
const firstCheckResult = checkForForcedToolUsage(
currentResponse,
originalToolChoice ?? 'auto',
@@ -450,12 +458,19 @@ async function executeChatCompletionsRequest(
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
})
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'azure_openai' }
+ )
+
modelTime += thisModelTime
if (currentResponse.choices[0]?.message?.content) {
diff --git a/apps/sim/providers/bedrock/index.ts b/apps/sim/providers/bedrock/index.ts
index f054d781999..31c8d14cfc6 100644
--- a/apps/sim/providers/bedrock/index.ts
+++ b/apps/sim/providers/bedrock/index.ts
@@ -5,6 +5,7 @@ import {
type ContentBlock,
type ConversationRole,
ConverseCommand,
+ type ConverseResponse,
ConverseStreamCommand,
type SystemContentBlock,
type Tool,
@@ -14,7 +15,7 @@ import {
} from '@aws-sdk/client-bedrock-runtime'
import { createLogger } from '@sim/logger'
import { toError } from '@sim/utils/errors'
-import type { StreamingExecution } from '@/executor/types'
+import type { IterationToolCall, StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import {
checkForForcedToolUsage,
@@ -23,6 +24,7 @@ import {
getBedrockInferenceProfileId,
} from '@/providers/bedrock/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegment } from '@/providers/trace-enrichment'
import type {
FunctionCallResponse,
ProviderConfig,
@@ -41,6 +43,62 @@ import { executeTool } from '@/tools'
const logger = createLogger('BedrockProvider')
+function enrichLastModelSegmentFromBedrockResponse(
+ timeSegments: TimeSegment[],
+ response: ConverseResponse,
+ extras: { model: string }
+): void {
+ const blocks: ContentBlock[] = response.output?.message?.content ?? []
+
+ const assistantText = blocks
+ .filter((b): b is ContentBlock & { text: string } => 'text' in b && typeof b.text === 'string')
+ .map((b) => b.text)
+ .join('\n')
+ const assistantContent = assistantText.length > 0 ? assistantText : undefined
+
+ const toolCalls: IterationToolCall[] = blocks
+ .filter((b): b is ContentBlock & { toolUse: ToolUseBlock } => 'toolUse' in b && !!b.toolUse)
+ .map((b) => {
+ const input = b.toolUse.input
+ return {
+ id: b.toolUse.toolUseId ?? '',
+ name: b.toolUse.name ?? '',
+ arguments:
+ input && typeof input === 'object' && !Array.isArray(input)
+ ? (input as Record)
+ : {},
+ }
+ })
+
+ const inputTokens = response.usage?.inputTokens
+ const outputTokens = response.usage?.outputTokens
+
+ let cost: { input: number; output: number; total: number } | undefined
+ if (typeof inputTokens === 'number' && typeof outputTokens === 'number') {
+ const full = calculateCost(extras.model, inputTokens, outputTokens)
+ cost = { input: full.input, output: full.output, total: full.total }
+ }
+
+ enrichLastModelSegment(timeSegments, {
+ assistantContent,
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
+ finishReason: response.stopReason ?? undefined,
+ tokens:
+ inputTokens !== undefined || outputTokens !== undefined
+ ? {
+ input: inputTokens,
+ output: outputTokens,
+ total:
+ typeof inputTokens === 'number' && typeof outputTokens === 'number'
+ ? inputTokens + outputTokens
+ : undefined,
+ }
+ : undefined,
+ cost,
+ provider: 'aws.bedrock',
+ })
+}
+
export const bedrockProvider: ProviderConfig = {
id: 'bedrock',
name: 'AWS Bedrock',
@@ -345,7 +403,7 @@ export const bedrockProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -444,13 +502,17 @@ export const bedrockProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
},
]
+ enrichLastModelSegmentFromBedrockResponse(timeSegments, currentResponse, {
+ model: request.model,
+ })
+
const initialToolUseContentBlocks = (currentResponse.output?.message?.content || []).filter(
(block): block is ContentBlock & { toolUse: ToolUseBlock } => 'toolUse' in block
)
@@ -668,12 +730,16 @@ export const bedrockProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
})
+ enrichLastModelSegmentFromBedrockResponse(timeSegments, currentResponse, {
+ model: request.model,
+ })
+
modelTime += thisModelTime
if (currentResponse.usage) {
@@ -725,6 +791,10 @@ export const bedrockProvider: ProviderConfig = {
duration: structuredOutputEndTime - structuredOutputStartTime,
})
+ enrichLastModelSegmentFromBedrockResponse(timeSegments, structuredResponse, {
+ model: request.model,
+ })
+
modelTime += structuredOutputEndTime - structuredOutputStartTime
const structuredOutputCall = structuredResponse.output?.message?.content?.find(
diff --git a/apps/sim/providers/cerebras/index.ts b/apps/sim/providers/cerebras/index.ts
index 2bdfcdc1722..fe6f0bba76c 100644
--- a/apps/sim/providers/cerebras/index.ts
+++ b/apps/sim/providers/cerebras/index.ts
@@ -6,6 +6,7 @@ import { MAX_TOOL_ITERATIONS } from '@/providers'
import type { CerebrasResponse } from '@/providers/cerebras/types'
import { createReadableStreamFromCerebrasStream } from '@/providers/cerebras/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
ProviderConfig,
ProviderRequest,
@@ -161,7 +162,7 @@ export const cerebrasProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -206,7 +207,7 @@ export const cerebrasProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -219,6 +220,13 @@ export const cerebrasProvider: ProviderConfig = {
while (iterationCount < MAX_TOOL_ITERATIONS) {
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'cerebras' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
if (currentResponse.choices[0]?.message?.content) {
content = currentResponse.choices[0].message.content
@@ -313,6 +321,7 @@ export const cerebrasProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
if (result.success && result.output) {
@@ -382,7 +391,7 @@ export const cerebrasProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: 'Final response',
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -399,6 +408,13 @@ export const cerebrasProvider: ProviderConfig = {
tokens.total += finalResponse.usage.total_tokens || 0
}
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ finalResponse,
+ finalResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'cerebras' }
+ )
+
break
}
@@ -419,7 +435,7 @@ export const cerebrasProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -435,6 +451,15 @@ export const cerebrasProvider: ProviderConfig = {
iterationCount++
}
}
+
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'cerebras' }
+ )
+ }
} catch (error) {
logger.error('Error in Cerebras tool processing:', { error })
}
@@ -564,3 +589,8 @@ export const cerebrasProvider: ProviderConfig = {
}
},
}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, tool calls, finish reason, token usage.
+ */
diff --git a/apps/sim/providers/deepseek/index.ts b/apps/sim/providers/deepseek/index.ts
index bd4abf1ace4..6f5c0612e3d 100644
--- a/apps/sim/providers/deepseek/index.ts
+++ b/apps/sim/providers/deepseek/index.ts
@@ -5,6 +5,7 @@ import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { createReadableStreamFromDeepseekStream } from '@/providers/deepseek/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
ProviderConfig,
ProviderRequest,
@@ -161,7 +162,7 @@ export const deepseekProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -217,7 +218,7 @@ export const deepseekProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -248,6 +249,14 @@ export const deepseekProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'deepseek' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -324,6 +333,7 @@ export const deepseekProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
@@ -410,7 +420,7 @@ export const deepseekProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -432,6 +442,15 @@ export const deepseekProvider: ProviderConfig = {
iterationCount++
}
+
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'deepseek' }
+ )
+ }
} catch (error) {
logger.error('Error in Deepseek request:', { error })
}
diff --git a/apps/sim/providers/fireworks/index.ts b/apps/sim/providers/fireworks/index.ts
index 08d24584f96..6aa336ec7b9 100644
--- a/apps/sim/providers/fireworks/index.ts
+++ b/apps/sim/providers/fireworks/index.ts
@@ -10,6 +10,7 @@ import {
supportsNativeStructuredOutputs,
} from '@/providers/fireworks/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
FunctionCallResponse,
Message,
@@ -209,7 +210,7 @@ export const fireworksProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -257,7 +258,7 @@ export const fireworksProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -279,6 +280,14 @@ export const fireworksProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'fireworks' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -358,6 +367,7 @@ export const fireworksProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
@@ -423,7 +433,7 @@ export const fireworksProvider: ProviderConfig = {
const thisModelTime = nextModelEndTime - nextModelStartTime
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -440,6 +450,15 @@ export const fireworksProvider: ProviderConfig = {
iterationCount++
}
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'fireworks' }
+ )
+ }
+
if (request.stream) {
const accumulatedCost = calculateCost(requestedModel, tokens.input, tokens.output)
@@ -572,6 +591,13 @@ export const fireworksProvider: ProviderConfig = {
tokens.output += finalResponse.usage.completion_tokens || 0
tokens.total += finalResponse.usage.total_tokens || 0
}
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ finalResponse,
+ finalResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'fireworks' }
+ )
}
const providerEndTime = Date.now()
@@ -622,3 +648,8 @@ export const fireworksProvider: ProviderConfig = {
}
},
}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, tool calls, finish reason, token usage.
+ */
diff --git a/apps/sim/providers/gemini/core.ts b/apps/sim/providers/gemini/core.ts
index 786975eabcc..e22baeda8e7 100644
--- a/apps/sim/providers/gemini/core.ts
+++ b/apps/sim/providers/gemini/core.ts
@@ -13,7 +13,7 @@ import {
} from '@google/genai'
import { createLogger } from '@sim/logger'
import { toError } from '@sim/utils/errors'
-import type { StreamingExecution } from '@/executor/types'
+import type { IterationToolCall, StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import {
checkForForcedToolUsage,
@@ -26,7 +26,13 @@ import {
extractTextContent,
mapToThinkingLevel,
} from '@/providers/google/utils'
-import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types'
+import { enrichLastModelSegment } from '@/providers/trace-enrichment'
+import type {
+ FunctionCallResponse,
+ ProviderRequest,
+ ProviderResponse,
+ TimeSegment,
+} from '@/providers/types'
import {
calculateCost,
isDeepResearchModel,
@@ -71,7 +77,7 @@ function createInitialState(
timeSegments: [
{
type: 'model',
- name: 'Initial response',
+ name: model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -218,6 +224,7 @@ async function executeToolCallsBatch(
startTime: r.startTime,
endTime: r.endTime,
duration: r.duration,
+ toolCallId: r.part.functionCall?.id ?? undefined,
})
totalToolsTime += r.duration
@@ -279,7 +286,7 @@ function updateStateWithResponse(
...state.timeSegments,
{
type: 'model',
- name: `Model response (iteration ${state.iterationCount + 1})`,
+ name: model,
startTime,
endTime,
duration,
@@ -1074,6 +1081,9 @@ export async function executeGeminiRequest(
model,
toolConfig
)
+ enrichLastModelSegmentFromGeminiResponse(state.timeSegments, response, {
+ model,
+ })
const forcedTools = preparedTools?.forcedTools ?? []
let currentResponse = response
@@ -1122,6 +1132,9 @@ export async function executeGeminiRequest(
config: nextConfig,
})
state = updateStateWithResponse(state, checkResponse, model, Date.now() - 100, Date.now())
+ enrichLastModelSegmentFromGeminiResponse(state.timeSegments, checkResponse, {
+ model,
+ })
if (checkResponse.functionCalls?.length) {
currentResponse = checkResponse
@@ -1207,6 +1220,9 @@ export async function executeGeminiRequest(
config: nextConfig,
})
state = updateStateWithResponse(state, nextResponse, model, nextModelStartTime, Date.now())
+ enrichLastModelSegmentFromGeminiResponse(state.timeSegments, nextResponse, {
+ model,
+ })
currentResponse = nextResponse
}
@@ -1257,3 +1273,80 @@ export async function executeGeminiRequest(
throw enhancedError
}
}
+
+/**
+ * Enriches the last model segment with per-iteration content extracted from a
+ * Gemini response: assistant text, thinking (thought) parts, function calls,
+ * finish reason, and token usage.
+ */
+function enrichLastModelSegmentFromGeminiResponse(
+ timeSegments: TimeSegment[],
+ response: GenerateContentResponse,
+ extras?: {
+ model?: string
+ ttft?: number
+ errorType?: string
+ errorMessage?: string
+ }
+): void {
+ const candidate = response.candidates?.[0]
+ const assistantText = extractTextContent(candidate)
+
+ const thinkingParts =
+ candidate?.content?.parts?.filter((p): p is Part & { text: string } =>
+ Boolean(p.text && p.thought === true)
+ ) ?? []
+ const thinkingContent = thinkingParts.map((p) => p.text).join('\n\n')
+
+ const functionCallParts = extractAllFunctionCallParts(candidate)
+ const toolCalls: IterationToolCall[] = functionCallParts
+ .filter((p): p is Part & { functionCall: NonNullable } =>
+ Boolean(p.functionCall)
+ )
+ .map((p) => ({
+ id: p.functionCall.id ?? '',
+ name: p.functionCall.name ?? '',
+ arguments: (p.functionCall.args ?? {}) as Record,
+ }))
+
+ const usage = convertUsageMetadata(response.usageMetadata)
+ const cachedContentTokens = response.usageMetadata?.cachedContentTokenCount ?? 0
+ const thoughtsTokens = response.usageMetadata?.thoughtsTokenCount ?? 0
+
+ let cost: { input: number; output: number; total: number } | undefined
+ if (
+ extras?.model &&
+ response.usageMetadata &&
+ typeof usage.promptTokenCount === 'number' &&
+ typeof usage.candidatesTokenCount === 'number'
+ ) {
+ const full = calculateCost(
+ extras.model,
+ usage.promptTokenCount,
+ usage.candidatesTokenCount,
+ cachedContentTokens > 0
+ )
+ cost = { input: full.input, output: full.output, total: full.total }
+ }
+
+ enrichLastModelSegment(timeSegments, {
+ assistantContent: assistantText || undefined,
+ thinkingContent: thinkingContent || undefined,
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
+ finishReason: candidate?.finishReason ?? undefined,
+ tokens: response.usageMetadata
+ ? {
+ input: usage.promptTokenCount,
+ output: usage.candidatesTokenCount,
+ total: usage.totalTokenCount,
+ ...(cachedContentTokens > 0 && { cacheRead: cachedContentTokens }),
+ ...(thoughtsTokens > 0 && { reasoning: thoughtsTokens }),
+ }
+ : undefined,
+ cost,
+ provider: 'google',
+ ttft: extras?.ttft,
+ errorType: extras?.errorType,
+ errorMessage: extras?.errorMessage,
+ })
+}
diff --git a/apps/sim/providers/groq/index.ts b/apps/sim/providers/groq/index.ts
index fba8984e86b..192e1412d94 100644
--- a/apps/sim/providers/groq/index.ts
+++ b/apps/sim/providers/groq/index.ts
@@ -5,6 +5,7 @@ import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { createReadableStreamFromGroqStream } from '@/providers/groq/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
ProviderConfig,
ProviderRequest,
@@ -162,7 +163,7 @@ export const groqProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -212,7 +213,7 @@ export const groqProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -226,6 +227,14 @@ export const groqProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'groq' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -302,6 +311,7 @@ export const groqProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
@@ -373,7 +383,7 @@ export const groqProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -393,6 +403,15 @@ export const groqProvider: ProviderConfig = {
iterationCount++
}
+
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'groq' }
+ )
+ }
} catch (error) {
logger.error('Error in Groq request:', { error })
}
diff --git a/apps/sim/providers/mistral/index.ts b/apps/sim/providers/mistral/index.ts
index 32e24c1f329..ffe1ecad930 100644
--- a/apps/sim/providers/mistral/index.ts
+++ b/apps/sim/providers/mistral/index.ts
@@ -6,6 +6,7 @@ import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { createReadableStreamFromMistralStream } from '@/providers/mistral/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
ProviderConfig,
ProviderRequest,
@@ -200,7 +201,7 @@ export const mistralProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -272,7 +273,7 @@ export const mistralProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -287,6 +288,14 @@ export const mistralProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'mistral' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -365,6 +374,7 @@ export const mistralProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
@@ -433,7 +443,7 @@ export const mistralProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -454,6 +464,15 @@ export const mistralProvider: ProviderConfig = {
iterationCount++
}
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'mistral' }
+ )
+ }
+
if (request.stream) {
logger.info('Using streaming for final response after tool processing')
@@ -576,3 +595,8 @@ export const mistralProvider: ProviderConfig = {
}
},
}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, tool calls, finish reason, token usage.
+ */
diff --git a/apps/sim/providers/ollama/index.ts b/apps/sim/providers/ollama/index.ts
index 45ea3802b9c..045dd1d462a 100644
--- a/apps/sim/providers/ollama/index.ts
+++ b/apps/sim/providers/ollama/index.ts
@@ -7,6 +7,7 @@ import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import type { ModelsObject } from '@/providers/ollama/types'
import { createReadableStreamFromOllamaStream } from '@/providers/ollama/utils'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
ProviderConfig,
ProviderRequest,
@@ -230,7 +231,7 @@ export const ollamaProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -282,7 +283,7 @@ export const ollamaProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -295,6 +296,14 @@ export const ollamaProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'ollama' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -375,6 +384,7 @@ export const ollamaProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
@@ -426,7 +436,7 @@ export const ollamaProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -449,6 +459,15 @@ export const ollamaProvider: ProviderConfig = {
iterationCount++
}
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'ollama' }
+ )
+ }
+
if (request.stream) {
logger.info('Using streaming for final response after tool processing')
@@ -579,3 +598,8 @@ export const ollamaProvider: ProviderConfig = {
}
},
}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, tool calls, finish reason, token usage.
+ */
diff --git a/apps/sim/providers/openai/core.ts b/apps/sim/providers/openai/core.ts
index 6a0104e0651..1f025269235 100644
--- a/apps/sim/providers/openai/core.ts
+++ b/apps/sim/providers/openai/core.ts
@@ -1,8 +1,9 @@
import type { Logger } from '@sim/logger'
import { toError } from '@sim/utils/errors'
import type OpenAI from 'openai'
-import type { StreamingExecution } from '@/executor/types'
+import type { IterationToolCall, StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
+import { enrichLastModelSegment, parseToolCallArguments } from '@/providers/trace-enrichment'
import type { Message, ProviderRequest, ProviderResponse, TimeSegment } from '@/providers/types'
import { ProviderError } from '@/providers/types'
import {
@@ -18,6 +19,7 @@ import {
convertResponseOutputToInputItems,
convertToolsToResponses,
createReadableStreamFromResponses,
+ extractResponseReasoning,
extractResponseText,
extractResponseToolCalls,
parseResponsesUsage,
@@ -347,7 +349,7 @@ export async function executeResponsesProviderRequest(
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -416,7 +418,7 @@ export async function executeResponsesProviderRequest(
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -435,6 +437,15 @@ export async function executeResponsesProviderRequest(
}
const toolCallsInResponse = extractResponseToolCalls(currentResponse.output)
+
+ enrichLastModelSegmentFromOpenAIResponse(
+ timeSegments,
+ currentResponse,
+ responseText,
+ toolCallsInResponse,
+ { model: request.model }
+ )
+
if (!toolCallsInResponse.length) {
break
}
@@ -511,6 +522,7 @@ export async function executeResponsesProviderRequest(
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: Record
@@ -586,7 +598,7 @@ export async function executeResponsesProviderRequest(
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -604,6 +616,18 @@ export async function executeResponsesProviderRequest(
iterationCount++
}
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ const trailingText = extractResponseText(currentResponse.output)
+ const trailingToolCalls = extractResponseToolCalls(currentResponse.output)
+ enrichLastModelSegmentFromOpenAIResponse(
+ timeSegments,
+ currentResponse,
+ trailingText,
+ trailingToolCalls,
+ { model: request.model }
+ )
+ }
+
// For Azure with deferred format: make a final call with the response format applied
// This happens whenever we have a deferred format, even if no tools were called
// (the initial call was made without the format, so we need to apply it now)
@@ -685,6 +709,14 @@ export async function executeResponsesProviderRequest(
content = formattedText
}
+ enrichLastModelSegmentFromOpenAIResponse(
+ timeSegments,
+ currentResponse,
+ formattedText,
+ extractResponseToolCalls(currentResponse.output),
+ { model: request.model }
+ )
+
appliedDeferredFormat = true
}
@@ -821,3 +853,82 @@ export async function executeResponsesProviderRequest(
})
}
}
+
+/**
+ * Determines a finish reason for an OpenAI Responses API response.
+ * Maps to conventional values: 'tool_calls' | 'length' | 'stop'.
+ */
+function deriveOpenAIFinishReason(
+ response: OpenAI.Responses.Response,
+ toolCalls: ResponsesToolCall[]
+): string | undefined {
+ const incompleteReason = response.incomplete_details?.reason
+ if (incompleteReason === 'max_output_tokens') return 'length'
+ if (incompleteReason === 'content_filter') return 'content_filter'
+ if (toolCalls.length > 0) return 'tool_calls'
+ if (incompleteReason) return incompleteReason
+ if (response.status === 'failed') return 'error'
+ if (response.status === 'incomplete') return 'length'
+ if (response.status && response.status !== 'completed') return response.status
+ return 'stop'
+}
+
+/**
+ * Enriches the last model segment with per-iteration content extracted from an
+ * OpenAI Responses API response: assistant text, tool calls, finish reason,
+ * and token usage for the iteration.
+ */
+function enrichLastModelSegmentFromOpenAIResponse(
+ timeSegments: TimeSegment[],
+ response: OpenAI.Responses.Response,
+ assistantText: string,
+ toolCallsInResponse: ResponsesToolCall[],
+ extras?: {
+ model?: string
+ ttft?: number
+ errorType?: string
+ errorMessage?: string
+ }
+): void {
+ const toolCalls: IterationToolCall[] = toolCallsInResponse.map((tc) => ({
+ id: tc.id,
+ name: tc.name,
+ arguments:
+ typeof tc.arguments === 'string' ? parseToolCallArguments(tc.arguments) : tc.arguments,
+ }))
+
+ const usage = parseResponsesUsage(response.usage)
+ const thinkingContent = extractResponseReasoning(response.output)
+
+ let cost: { input: number; output: number; total: number } | undefined
+ if (extras?.model && usage) {
+ const full = calculateCost(
+ extras.model,
+ usage.promptTokens,
+ usage.completionTokens,
+ usage.cachedTokens > 0
+ )
+ cost = { input: full.input, output: full.output, total: full.total }
+ }
+
+ enrichLastModelSegment(timeSegments, {
+ assistantContent: assistantText || undefined,
+ thinkingContent: thinkingContent || undefined,
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
+ finishReason: deriveOpenAIFinishReason(response, toolCallsInResponse),
+ tokens: usage
+ ? {
+ input: usage.promptTokens,
+ output: usage.completionTokens,
+ total: usage.totalTokens,
+ ...(usage.cachedTokens > 0 && { cacheRead: usage.cachedTokens }),
+ ...(usage.reasoningTokens > 0 && { reasoning: usage.reasoningTokens }),
+ }
+ : undefined,
+ cost,
+ provider: 'openai',
+ ttft: extras?.ttft,
+ errorType: extras?.errorType,
+ errorMessage: extras?.errorMessage,
+ })
+}
diff --git a/apps/sim/providers/openai/utils.ts b/apps/sim/providers/openai/utils.ts
index f1575473ada..88efec06e2e 100644
--- a/apps/sim/providers/openai/utils.ts
+++ b/apps/sim/providers/openai/utils.ts
@@ -199,6 +199,29 @@ export function extractResponseText(output: OpenAI.Responses.ResponseOutputItem[
return textParts.join('')
}
+/**
+ * Extracts reasoning summary text from Responses API output items. Reasoning
+ * items (emitted by o1/o3/gpt-5) carry a `summary[]` of `{ type, text }` entries
+ * — we join the text for trace display. The raw `encrypted_content` is left
+ * alone; it's opaque plumbing for round-tripping across turns.
+ */
+export function extractResponseReasoning(output: OpenAI.Responses.ResponseOutputItem[]): string {
+ if (!Array.isArray(output)) return ''
+
+ const parts: string[] = []
+ for (const item of output) {
+ if (!item || item.type !== 'reasoning') continue
+ const summary = (item as unknown as { summary?: Array<{ text?: string | null } | null> })
+ .summary
+ if (!Array.isArray(summary)) continue
+ for (const entry of summary) {
+ const text = entry?.text
+ if (typeof text === 'string' && text.length > 0) parts.push(text)
+ }
+ }
+ return parts.join('\n\n')
+}
+
/**
* Converts Responses API output items into input items for subsequent calls.
*/
diff --git a/apps/sim/providers/openrouter/index.ts b/apps/sim/providers/openrouter/index.ts
index 89ae932c32d..87ff07fcfef 100644
--- a/apps/sim/providers/openrouter/index.ts
+++ b/apps/sim/providers/openrouter/index.ts
@@ -10,6 +10,7 @@ import {
createReadableStreamFromOpenAIStream,
supportsNativeStructuredOutputs,
} from '@/providers/openrouter/utils'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
FunctionCallResponse,
Message,
@@ -210,7 +211,7 @@ export const openRouterProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -258,7 +259,7 @@ export const openRouterProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -280,6 +281,14 @@ export const openRouterProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'openrouter' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -359,6 +368,7 @@ export const openRouterProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
@@ -424,7 +434,7 @@ export const openRouterProvider: ProviderConfig = {
const thisModelTime = nextModelEndTime - nextModelStartTime
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -441,6 +451,15 @@ export const openRouterProvider: ProviderConfig = {
iterationCount++
}
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'openrouter' }
+ )
+ }
+
if (request.stream) {
const accumulatedCost = calculateCost(requestedModel, tokens.input, tokens.output)
@@ -573,6 +592,13 @@ export const openRouterProvider: ProviderConfig = {
tokens.output += finalResponse.usage.completion_tokens || 0
tokens.total += finalResponse.usage.total_tokens || 0
}
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ finalResponse,
+ finalResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'openrouter' }
+ )
}
const providerEndTime = Date.now()
@@ -623,3 +649,8 @@ export const openRouterProvider: ProviderConfig = {
}
},
}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, tool calls, finish reason, token usage.
+ */
diff --git a/apps/sim/providers/trace-enrichment.ts b/apps/sim/providers/trace-enrichment.ts
new file mode 100644
index 00000000000..0d3c3232b28
--- /dev/null
+++ b/apps/sim/providers/trace-enrichment.ts
@@ -0,0 +1,221 @@
+import type { BlockTokens, IterationToolCall, ProviderTimingSegment } from '@/executor/types'
+import { calculateCost } from '@/providers/utils'
+
+/**
+ * Minimal structural shape shared by OpenAI Chat Completions and every
+ * OpenAI-compatible SDK (Groq, Cerebras, DeepSeek, xAI, Mistral, Ollama,
+ * OpenRouter, vLLM, Fireworks). Captures only the fields the trace enrichment
+ * helper reads, so providers can pass their own SDK's response type without
+ * a cast.
+ */
+interface ChatCompletionLike {
+ choices: Array<{
+ message?: {
+ content?: string | null
+ tool_calls?: Array | null
+ } | null
+ finish_reason?: string | null
+ } | null>
+ usage?: {
+ prompt_tokens?: number | null
+ completion_tokens?: number | null
+ total_tokens?: number | null
+ prompt_tokens_details?: { cached_tokens?: number | null } | null
+ completion_tokens_details?: { reasoning_tokens?: number | null } | null
+ /** DeepSeek's legacy cache shape (not nested under prompt_tokens_details). */
+ prompt_cache_hit_tokens?: number | null
+ } | null
+}
+
+interface ChatCompletionToolCallLike {
+ id: string
+ function: { name: string; arguments: string }
+}
+
+/**
+ * Content to attach to a model segment for a single provider iteration.
+ * All fields are optional — providers populate what the response carries.
+ */
+export interface ModelSegmentContent {
+ assistantContent?: string
+ thinkingContent?: string
+ toolCalls?: IterationToolCall[]
+ finishReason?: string
+ tokens?: BlockTokens
+ cost?: { input?: number; output?: number; total?: number }
+ ttft?: number
+ provider?: string
+ errorType?: string
+ errorMessage?: string
+}
+
+/**
+ * Enriches the most recent `type: 'model'` segment in `timeSegments` with
+ * content from the model response for that iteration. Writes only the fields
+ * provided; undefined fields are skipped so repeat calls can layer data.
+ *
+ * Call at the point where the response for the latest model segment is in hand
+ * — typically right after the provider call returns, before tool execution.
+ */
+export function enrichLastModelSegment(
+ timeSegments: ProviderTimingSegment[],
+ content: ModelSegmentContent
+): void {
+ for (let i = timeSegments.length - 1; i >= 0; i--) {
+ const segment = timeSegments[i]
+ if (segment.type !== 'model') continue
+
+ if (content.assistantContent !== undefined) {
+ segment.assistantContent = content.assistantContent
+ }
+ if (content.thinkingContent !== undefined) {
+ segment.thinkingContent = content.thinkingContent
+ }
+ if (content.toolCalls !== undefined) {
+ segment.toolCalls = content.toolCalls
+ }
+ if (content.finishReason !== undefined) {
+ segment.finishReason = content.finishReason
+ }
+ if (content.tokens !== undefined) {
+ segment.tokens = content.tokens
+ }
+ if (content.cost !== undefined) {
+ segment.cost = content.cost
+ }
+ if (content.ttft !== undefined) {
+ segment.ttft = content.ttft
+ }
+ if (content.provider !== undefined) {
+ segment.provider = content.provider
+ }
+ if (content.errorType !== undefined) {
+ segment.errorType = content.errorType
+ }
+ if (content.errorMessage !== undefined) {
+ segment.errorMessage = content.errorMessage
+ }
+ return
+ }
+}
+
+/**
+ * Parses a tool call's `function.arguments` JSON string into an object, or
+ * returns the raw string if it is not valid JSON.
+ */
+function parseToolCallArguments(rawArguments: string): Record | string {
+ try {
+ const parsed = JSON.parse(rawArguments)
+ if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
+ return parsed as Record
+ }
+ return rawArguments
+ } catch {
+ return rawArguments
+ }
+}
+
+/**
+ * Extracts reasoning/thinking content from a Chat Completions message. Covers
+ * non-OpenAI extensions emitted by reasoning-capable providers:
+ * - `reasoning_content`: DeepSeek, xAI, vLLM, Fireworks
+ * - `reasoning`: Groq, Cerebras, OpenRouter (flat)
+ * - `reasoning_details[]`: OpenRouter (structured per-block reasoning)
+ */
+function extractChatCompletionsReasoning(
+ message: NonNullable['message']
+): string | undefined {
+ if (!message) return undefined
+ const msg = message as unknown as {
+ reasoning_content?: string | null
+ reasoning?: string | null
+ reasoning_details?: Array<{ text?: string | null; summary?: string | null } | null> | null
+ }
+
+ if (typeof msg.reasoning_content === 'string' && msg.reasoning_content.length > 0) {
+ return msg.reasoning_content
+ }
+ if (typeof msg.reasoning === 'string' && msg.reasoning.length > 0) {
+ return msg.reasoning
+ }
+ if (Array.isArray(msg.reasoning_details)) {
+ const joined = msg.reasoning_details
+ .map((d) => d?.text ?? d?.summary ?? '')
+ .filter((s): s is string => typeof s === 'string' && s.length > 0)
+ .join('\n')
+ if (joined.length > 0) return joined
+ }
+ return undefined
+}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, thinking/reasoning, tool calls, finish
+ * reason, token usage. Shared by all OpenAI-compat providers.
+ */
+export function enrichLastModelSegmentFromChatCompletions(
+ timeSegments: ProviderTimingSegment[],
+ response: ChatCompletionLike,
+ toolCallsInResponse: ChatCompletionToolCallLike[] | undefined,
+ extras?: {
+ /** Model id used for this call — enables automatic cost calculation. */
+ model?: string
+ /** Provider system identifier (`gen_ai.system`). */
+ provider?: string
+ /** Time-to-first-token in ms (streaming path only). */
+ ttft?: number
+ /** Structured error class when the call failed. */
+ errorType?: string
+ /** Human-readable error message when the call failed. */
+ errorMessage?: string
+ /** Override the automatically derived cost. */
+ cost?: { input?: number; output?: number; total?: number }
+ }
+): void {
+ const choice = response.choices[0]
+ const assistantText = choice?.message?.content ?? ''
+ const thinkingText = extractChatCompletionsReasoning(choice?.message)
+
+ const toolCalls: IterationToolCall[] = (toolCallsInResponse ?? []).map((tc) => ({
+ id: tc.id,
+ name: tc.function.name,
+ arguments: parseToolCallArguments(tc.function.arguments),
+ }))
+
+ const usage = response.usage
+ const cacheRead =
+ usage?.prompt_tokens_details?.cached_tokens ?? usage?.prompt_cache_hit_tokens ?? 0
+ const reasoning = usage?.completion_tokens_details?.reasoning_tokens ?? 0
+
+ const promptTokens = usage?.prompt_tokens ?? undefined
+ const completionTokens = usage?.completion_tokens ?? undefined
+
+ let derivedCost = extras?.cost
+ if (!derivedCost && extras?.model && promptTokens != null && completionTokens != null) {
+ const full = calculateCost(extras.model, promptTokens, completionTokens, cacheRead > 0)
+ derivedCost = { input: full.input, output: full.output, total: full.total }
+ }
+
+ enrichLastModelSegment(timeSegments, {
+ assistantContent: assistantText || undefined,
+ thinkingContent: thinkingText,
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
+ finishReason: choice?.finish_reason ?? undefined,
+ tokens: usage
+ ? {
+ input: promptTokens,
+ output: completionTokens,
+ total: usage.total_tokens ?? undefined,
+ ...(cacheRead > 0 && { cacheRead }),
+ ...(reasoning > 0 && { reasoning }),
+ }
+ : undefined,
+ cost: derivedCost,
+ ttft: extras?.ttft,
+ provider: extras?.provider,
+ errorType: extras?.errorType,
+ errorMessage: extras?.errorMessage,
+ })
+}
+
+export { parseToolCallArguments }
diff --git a/apps/sim/providers/types.ts b/apps/sim/providers/types.ts
index 69c36079df7..468d0f8bdaa 100644
--- a/apps/sim/providers/types.ts
+++ b/apps/sim/providers/types.ts
@@ -1,4 +1,4 @@
-import type { StreamingExecution } from '@/executor/types'
+import type { ProviderTimingSegment, StreamingExecution } from '@/executor/types'
export type ProviderId =
| 'openai'
@@ -63,13 +63,12 @@ export interface FunctionCallResponse {
success?: boolean
}
-export interface TimeSegment {
- type: 'model' | 'tool'
- name: string
- startTime: number
- endTime: number
- duration: number
-}
+/**
+ * Provider-side alias for the canonical segment type. Providers push these into
+ * `providerTiming.timeSegments` during execution; the trace pipeline reads them
+ * verbatim when constructing child spans.
+ */
+export type TimeSegment = ProviderTimingSegment
export interface ProviderResponse {
content: string
diff --git a/apps/sim/providers/vllm/index.ts b/apps/sim/providers/vllm/index.ts
index 66027c43f96..db25ba45ec0 100644
--- a/apps/sim/providers/vllm/index.ts
+++ b/apps/sim/providers/vllm/index.ts
@@ -6,6 +6,7 @@ import { env } from '@/lib/core/config/env'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
Message,
ProviderConfig,
@@ -252,7 +253,7 @@ export const vllmProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -329,7 +330,7 @@ export const vllmProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -347,6 +348,14 @@ export const vllmProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'vllm' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -427,6 +436,7 @@ export const vllmProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
@@ -495,7 +505,7 @@ export const vllmProvider: ProviderConfig = {
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -519,6 +529,15 @@ export const vllmProvider: ProviderConfig = {
iterationCount++
}
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'vllm' }
+ )
+ }
+
if (request.stream) {
logger.info('Using streaming for final response after tool processing')
@@ -662,3 +681,8 @@ export const vllmProvider: ProviderConfig = {
}
},
}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, tool calls, finish reason, token usage.
+ */
diff --git a/apps/sim/providers/xai/index.ts b/apps/sim/providers/xai/index.ts
index fdbed7f5c47..309a9fd8f3b 100644
--- a/apps/sim/providers/xai/index.ts
+++ b/apps/sim/providers/xai/index.ts
@@ -5,6 +5,7 @@ import type { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
+import { enrichLastModelSegmentFromChatCompletions } from '@/providers/trace-enrichment'
import type {
Message,
ProviderConfig,
@@ -156,7 +157,7 @@ export const xAIProvider: ProviderConfig = {
timeSegments: [
{
type: 'model',
- name: 'Streaming response',
+ name: request.model,
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
@@ -227,7 +228,7 @@ export const xAIProvider: ProviderConfig = {
const timeSegments: TimeSegment[] = [
{
type: 'model',
- name: 'Initial response',
+ name: request.model,
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
@@ -251,6 +252,14 @@ export const xAIProvider: ProviderConfig = {
}
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
+
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ toolCallsInResponse,
+ { model: request.model, provider: 'xai' }
+ )
+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
@@ -331,6 +340,7 @@ export const xAIProvider: ProviderConfig = {
startTime: startTime,
endTime: endTime,
duration: duration,
+ toolCallId: toolCall.id,
})
let resultContent: any
if (result.success && result.output) {
@@ -441,7 +451,7 @@ export const xAIProvider: ProviderConfig = {
const thisModelTime = nextModelEndTime - nextModelStartTime
timeSegments.push({
type: 'model',
- name: `Model response (iteration ${iterationCount + 1})`,
+ name: request.model,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
@@ -461,6 +471,15 @@ export const xAIProvider: ProviderConfig = {
iterationCount++
}
+
+ if (iterationCount === MAX_TOOL_ITERATIONS) {
+ enrichLastModelSegmentFromChatCompletions(
+ timeSegments,
+ currentResponse,
+ currentResponse.choices[0]?.message?.tool_calls,
+ { model: request.model, provider: 'xai' }
+ )
+ }
} catch (error) {
logger.error('XAI Provider - Error in tool processing loop:', {
error: toError(error).message,
@@ -614,3 +633,8 @@ export const xAIProvider: ProviderConfig = {
}
},
}
+
+/**
+ * Enriches the last model segment with per-iteration content from a Chat
+ * Completions response: assistant text, tool calls, finish reason, token usage.
+ */
diff --git a/apps/sim/stores/logs/filters/types.ts b/apps/sim/stores/logs/filters/types.ts
index bdd103f16e9..3fbd85bfaee 100644
--- a/apps/sim/stores/logs/filters/types.ts
+++ b/apps/sim/stores/logs/filters/types.ts
@@ -1,3 +1,7 @@
+import type { ProviderTiming, TokenInfo, ToolCall, TraceSpan } from '@/lib/logs/types'
+
+export type { ProviderTiming, TokenInfo, ToolCall, TraceSpan }
+
export interface WorkflowData {
id: string
name: string
@@ -6,17 +10,6 @@ export interface WorkflowData {
state: any
}
-export interface ToolCall {
- name: string
- duration: number // in milliseconds
- startTime: string // ISO timestamp
- endTime: string // ISO timestamp
- status: 'success' | 'error' // Status of the tool call
- input?: Record // Input parameters (optional)
- output?: Record // Output data (optional)
- error?: string // Error message if status is 'error'
-}
-
export interface ToolCallMetadata {
toolCalls?: ToolCall[]
}
@@ -55,52 +48,6 @@ export interface CostMetadata {
}
}
-export interface TokenInfo {
- input?: number
- output?: number
- total?: number
- prompt?: number
- completion?: number
-}
-
-export interface ProviderTiming {
- duration: number
- startTime: string
- endTime: string
- segments: Array<{
- type: string
- name?: string
- startTime: string | number
- endTime: string | number
- duration: number
- }>
-}
-
-export interface TraceSpan {
- id: string
- name: string
- type: string
- duration: number // in milliseconds
- startTime: string
- endTime: string
- children?: TraceSpan[]
- toolCalls?: ToolCall[]
- status?: 'success' | 'error'
- errorHandled?: boolean
- tokens?: number | TokenInfo
- relativeStartMs?: number // Time in ms from the start of the parent span
- blockId?: string // Added to track the original block ID for relationship mapping
- input?: Record // Added to store input data for this span
- output?: Record // Added to store output data for this span
- model?: string
- cost?: {
- input?: number
- output?: number
- total?: number
- }
- providerTiming?: ProviderTiming
-}
-
export interface WorkflowLog {
id: string
workflowId: string | null
diff --git a/apps/sim/stores/logs/store.ts b/apps/sim/stores/logs/store.ts
index f9e0361e2c8..0360e7e7bb4 100644
--- a/apps/sim/stores/logs/store.ts
+++ b/apps/sim/stores/logs/store.ts
@@ -26,6 +26,11 @@ export const useLogDetailsUIStore = create()(
{
name: 'log-details-ui-state',
partialize: (state) => ({ panelWidth: state.panelWidth }),
+ onRehydrateStorage: () => (state) => {
+ if (state) {
+ state.panelWidth = clampPanelWidth(state.panelWidth)
+ }
+ },
}
)
)
diff --git a/apps/sim/stores/logs/utils.ts b/apps/sim/stores/logs/utils.ts
index 4b5d043d1d3..778320066c0 100644
--- a/apps/sim/stores/logs/utils.ts
+++ b/apps/sim/stores/logs/utils.ts
@@ -1,8 +1,8 @@
/**
* Width constraints for the log details panel.
*/
-export const MIN_LOG_DETAILS_WIDTH = 400
-export const DEFAULT_LOG_DETAILS_WIDTH = 400
+export const MIN_LOG_DETAILS_WIDTH = 600
+export const DEFAULT_LOG_DETAILS_WIDTH = 600
export const MAX_LOG_DETAILS_WIDTH_RATIO = 0.65
/**