(functi
formatFileSize={formatFileSize}
styles={styles}
/>
+ {hasUnsupportedModalities && (
+
+
+
+ {unsupportedAttachmentTypes.length > 0 && (
+ <>
+ This target does not support {unsupportedAttachmentTypes.join(', ')} attachments.
+ Remove them to send.
+ >
+ )}
+ {unsupportedAttachmentTypes.length > 0 && unsupportedConverterOutputTypes.length > 0 && ' '}
+ {unsupportedConverterOutputTypes.length > 0 && (
+ <>
+ The selected converter produces{' '}
+ {unsupportedConverterOutputTypes.map(formatModalityLabel).join(', ')} output, which this target
+ does not support.
+ >
+ )}
+
+
+ )}
@@ -454,10 +552,21 @@ const ChatInputArea = forwardRef(functi
appearance="primary"
icon={}
onClick={handleSend}
- disabled={disabled || (!input && attachments.length === 0)}
+ disabled={disabled || (!input && attachments.length === 0) || hasUnsupportedModalities}
title="Send message"
data-testid="send-message-btn"
/>
+ {convertedValue && (
+
+ }
+ onClick={onClearConversion}
+ data-testid="clear-conversion-btn"
+ />
+
+ )}
diff --git a/frontend/src/components/Chat/ChatWindow.test.tsx b/frontend/src/components/Chat/ChatWindow.test.tsx
index 240e44b9d3..a2666fc18c 100644
--- a/frontend/src/components/Chat/ChatWindow.test.tsx
+++ b/frontend/src/components/Chat/ChatWindow.test.tsx
@@ -2,8 +2,7 @@ import { render, screen, waitFor } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { FluentProvider, webLightTheme } from "@fluentui/react-components";
import ChatWindow from "./ChatWindow";
-import { Message, TargetInfo, TargetInstance } from "../../types";
-import type { TargetCapabilitiesInfo } from "../../types";
+import { Message, TargetCapabilitiesInfo, TargetInfo, TargetInstance } from "../../types";
import { attacksApi, convertersApi } from "../../services/api";
import * as messageMapper from "../../utils/messageMapper";
diff --git a/frontend/src/components/Chat/ChatWindow.tsx b/frontend/src/components/Chat/ChatWindow.tsx
index 8810e19182..43b3b438e0 100644
--- a/frontend/src/components/Chat/ChatWindow.tsx
+++ b/frontend/src/components/Chat/ChatWindow.tsx
@@ -591,6 +591,7 @@ export default function ChatWindow({
if (!existing) return prev
return { ...prev, text: { ...existing, convertedValue: val } }
})}
+ converterOutputDataTypes={Object.values(pieceConversions).map((c) => c.outputDataType)}
mediaConversions={Object.entries(pieceConversions)
.filter(([k]) => k !== 'text')
.map(([k, v]) => ({ pieceType: k, convertedValue: v.convertedValue }))}
diff --git a/frontend/src/components/Chat/ConverterPanel.test.tsx b/frontend/src/components/Chat/ConverterPanel.test.tsx
index 0c6bf567d9..eb2db06f34 100644
--- a/frontend/src/components/Chat/ConverterPanel.test.tsx
+++ b/frontend/src/components/Chat/ConverterPanel.test.tsx
@@ -416,6 +416,7 @@ describe('ConverterPanel use converted value', () => {
converterInstanceId: 'conv-1',
convertedValue: 'aGVsbG8=',
originalValue: 'hello',
+ outputDataType: 'text',
})
})
})
@@ -823,25 +824,4 @@ describe('ConverterPanel edge cases', () => {
// No params section should be shown
expect(screen.queryByTestId('converter-params')).not.toBeInTheDocument()
})
-
- it('handles converter with empty supported types', async () => {
- const catalogEmptyTypes = {
- items: [
- {
- converter_type: 'EmptyTypesConverter',
- supported_input_types: [],
- supported_output_types: [],
- parameters: [],
- is_llm_based: false,
- description: 'No type restrictions.',
- },
- ],
- }
- mockedConvertersApi.listConverterCatalog.mockResolvedValueOnce(catalogEmptyTypes as ConverterCatalogResponse)
-
- renderPanel({ previewText: 'hello' })
- await waitForList()
- await openComboboxAndSelect('EmptyTypesConverter')
- expect(screen.getByTestId('converter-item-EmptyTypesConverter')).toBeInTheDocument()
- })
})
diff --git a/frontend/src/components/Chat/ConverterPanel/ConverterPanel.tsx b/frontend/src/components/Chat/ConverterPanel/ConverterPanel.tsx
index 66e3d0e6ab..582ba6b362 100644
--- a/frontend/src/components/Chat/ConverterPanel/ConverterPanel.tsx
+++ b/frontend/src/components/Chat/ConverterPanel/ConverterPanel.tsx
@@ -107,7 +107,7 @@ export default function ConverterPanel({ onClose, previewText = '', attachmentDa
const groups: Record = {}
const order = ['text', 'image_path', 'audio_path', 'video_path', 'binary_path']
for (const c of filteredConverters) {
- const outType = (c.supported_output_types ?? [])[0] ?? 'text'
+ const outType = c.supported_output_types[0]
if (!groups[outType]) groups[outType] = []
groups[outType].push(c)
}
@@ -363,7 +363,7 @@ export default function ConverterPanel({ onClose, previewText = '', attachmentDa
)}
In:
- {(selectedConverter.supported_input_types ?? []).map((t) => (
+ {selectedConverter.supported_input_types.map((t) => (
{t.replace('_path', '')}
@@ -371,7 +371,7 @@ export default function ConverterPanel({ onClose, previewText = '', attachmentDa
Out:
- {(selectedConverter.supported_output_types ?? []).map((t) => (
+ {selectedConverter.supported_output_types.map((t) => (
{t.replace('_path', '')}
@@ -403,6 +403,7 @@ export default function ConverterPanel({ onClose, previewText = '', attachmentDa
previewConverterInstanceId={previewConverterInstanceId}
onPreview={handlePreview}
onUseConvertedValue={onUseConvertedValue}
+ outputDataType={selectedConverter?.supported_output_types[0] ?? 'text'}
/>
)}
diff --git a/frontend/src/components/Chat/ConverterPanel/ConverterPreview.test.tsx b/frontend/src/components/Chat/ConverterPanel/ConverterPreview.test.tsx
index dff3ac6000..74a018a101 100644
--- a/frontend/src/components/Chat/ConverterPanel/ConverterPreview.test.tsx
+++ b/frontend/src/components/Chat/ConverterPanel/ConverterPreview.test.tsx
@@ -19,6 +19,7 @@ function renderPreview(overrides: Partial = {}) {
previewConverterInstanceId: null,
onPreview: jest.fn(),
onUseConvertedValue: jest.fn(),
+ outputDataType: 'text',
}
return render(
@@ -197,6 +198,7 @@ describe('Use Converted Value button', () => {
converterInstanceId: 'conv-1',
convertedValue: 'aGVsbG8=',
originalValue: 'hello',
+ outputDataType: 'text',
})
})
@@ -216,6 +218,7 @@ describe('Use Converted Value button', () => {
converterInstanceId: 'conv-2',
convertedValue: '/path/to/output.png',
originalValue: 'data:image/png;base64,abc',
+ outputDataType: 'text',
})
})
diff --git a/frontend/src/components/Chat/ConverterPanel/ConverterPreview.tsx b/frontend/src/components/Chat/ConverterPanel/ConverterPreview.tsx
index ccf4ac1886..cd596467cb 100644
--- a/frontend/src/components/Chat/ConverterPanel/ConverterPreview.tsx
+++ b/frontend/src/components/Chat/ConverterPanel/ConverterPreview.tsx
@@ -14,9 +14,10 @@ export interface ConverterPreviewProps {
previewConverterInstanceId: string | null
onPreview: () => void
onUseConvertedValue?: (conversion: PieceConversion) => void
+ outputDataType: string
}
-export default function ConverterPreview({ activeTab, previewText, attachmentData, selectedConverterType, isPreviewing, previewError, previewOutput, previewConverterInstanceId, onPreview, onUseConvertedValue }: ConverterPreviewProps) {
+export default function ConverterPreview({ activeTab, previewText, attachmentData, selectedConverterType, isPreviewing, previewError, previewOutput, previewConverterInstanceId, onPreview, onUseConvertedValue, outputDataType }: ConverterPreviewProps) {
const styles = useConverterPanelStyles()
return (
@@ -95,6 +96,7 @@ export default function ConverterPreview({ activeTab, previewText, attachmentDat
converterInstanceId: previewConverterInstanceId,
convertedValue: previewOutput,
originalValue: activeTab === 'text' ? previewText : (attachmentData[activeTab] ?? ''),
+ outputDataType,
})}
disabled={!onUseConvertedValue}
data-testid="use-converted-btn"
diff --git a/frontend/src/components/Chat/converterTypes.ts b/frontend/src/components/Chat/converterTypes.ts
index 232a70490b..91e004c211 100644
--- a/frontend/src/components/Chat/converterTypes.ts
+++ b/frontend/src/components/Chat/converterTypes.ts
@@ -3,6 +3,7 @@ export const PIECE_TYPE_TO_DATA_TYPE: Record = {
image: 'image_path',
audio: 'audio_path',
video: 'video_path',
+ file: 'binary_path',
}
export interface PieceConversion {
@@ -10,4 +11,5 @@ export interface PieceConversion {
convertedValue: string
originalValue: string
pieceType: string
+ outputDataType: string
}
diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts
index f3db442914..a901860f43 100644
--- a/frontend/src/types/index.ts
+++ b/frontend/src/types/index.ts
@@ -73,7 +73,6 @@ export interface TargetInstance {
temperature?: number | null
top_p?: number | null
max_requests_per_minute?: number | null
- supports_multi_turn?: boolean
capabilities?: TargetCapabilitiesInfo | null
target_specific_params?: Record | null
}
diff --git a/pyrit/backend/mappers/target_mappers.py b/pyrit/backend/mappers/target_mappers.py
index e39de13ba8..caf045aa17 100644
--- a/pyrit/backend/mappers/target_mappers.py
+++ b/pyrit/backend/mappers/target_mappers.py
@@ -7,6 +7,37 @@
from pyrit.backend.models.targets import TargetCapabilitiesInfo, TargetInstance
from pyrit.prompt_target import PromptTarget
+from pyrit.prompt_target.common.target_capabilities import CapabilityName, TargetCapabilities
+
+# Capability flag names that should never be surfaced as identifier-level params:
+# they are sourced from `target_obj.capabilities` instead.
+_CAPABILITY_PARAM_NAMES = frozenset(cap.value for cap in CapabilityName)
+
+
+def _target_capabilities_to_info(capabilities: TargetCapabilities) -> TargetCapabilitiesInfo:
+ """
+ Build a TargetCapabilitiesInfo DTO from a domain TargetCapabilities object.
+
+ Modality combinations are flattened into sorted unique modality lists since
+ the frontend uses them only for per-piece modality checks.
+
+ Args:
+ capabilities: The domain TargetCapabilities object.
+
+ Returns:
+ TargetCapabilitiesInfo DTO mirroring the capability flags and flattened
+ input/output modalities.
+ """
+ return TargetCapabilitiesInfo(
+ supports_multi_turn=capabilities.supports_multi_turn,
+ supports_multi_message_pieces=capabilities.supports_multi_message_pieces,
+ supports_json_schema=capabilities.supports_json_schema,
+ supports_json_output=capabilities.supports_json_output,
+ supports_editable_history=capabilities.supports_editable_history,
+ supports_system_prompt=capabilities.supports_system_prompt,
+ supported_input_modalities=sorted({str(t) for combo in capabilities.input_modalities for t in combo}),
+ supported_output_modalities=sorted({str(t) for combo in capabilities.output_modalities for t in combo}),
+ )
def target_object_to_instance(target_registry_name: str, target_obj: PromptTarget) -> TargetInstance:
@@ -26,8 +57,10 @@ def target_object_to_instance(target_registry_name: str, target_obj: PromptTarge
identifier = target_obj.get_identifier()
params = identifier.params
- # Keys that are extracted as top-level TargetInstance fields
- # or are internal-only (target_configuration is the verbose capabilities blob).
+ # Keys that are extracted as top-level TargetInstance fields, are internal-only
+ # (e.g., target_configuration is the verbose capabilities blob), or duplicate
+ # capability flags (filtered via _CAPABILITY_PARAM_NAMES) — those are sourced
+ # solely from target_obj.capabilities and must not leak into target_specific_params.
extracted_keys = {
"endpoint",
"model_name",
@@ -35,30 +68,15 @@ def target_object_to_instance(target_registry_name: str, target_obj: PromptTarge
"temperature",
"top_p",
"max_requests_per_minute",
- "supports_multi_turn",
"target_specific_params",
"target_configuration",
- }
+ } | _CAPABILITY_PARAM_NAMES
# Collect remaining params as target_specific_params so the frontend can display them
explicit_specific = params.get("target_specific_params") or {}
extra = {k: v for k, v in params.items() if k not in extracted_keys and v is not None}
combined_specific = {**extra, **explicit_specific} or None
- caps = target_obj.capabilities
- input_modalities = sorted({modality for combo in caps.input_modalities for modality in combo})
- output_modalities = sorted({modality for combo in caps.output_modalities for modality in combo})
- capabilities = TargetCapabilitiesInfo(
- supports_multi_turn=caps.supports_multi_turn,
- supports_multi_message_pieces=caps.supports_multi_message_pieces,
- supports_json_schema=caps.supports_json_schema,
- supports_json_output=caps.supports_json_output,
- supports_editable_history=caps.supports_editable_history,
- supports_system_prompt=caps.supports_system_prompt,
- supported_input_modalities=input_modalities,
- supported_output_modalities=output_modalities,
- )
-
return TargetInstance(
target_registry_name=target_registry_name,
target_type=identifier.class_name,
@@ -68,7 +86,6 @@ def target_object_to_instance(target_registry_name: str, target_obj: PromptTarge
temperature=params.get("temperature"),
top_p=params.get("top_p"),
max_requests_per_minute=params.get("max_requests_per_minute"),
- supports_multi_turn=caps.supports_multi_turn,
- capabilities=capabilities,
+ capabilities=_target_capabilities_to_info(target_obj.capabilities),
target_specific_params=combined_specific,
)
diff --git a/pyrit/backend/models/__init__.py b/pyrit/backend/models/__init__.py
index d606d89eb0..ca21c49f7b 100644
--- a/pyrit/backend/models/__init__.py
+++ b/pyrit/backend/models/__init__.py
@@ -53,6 +53,7 @@
)
from pyrit.backend.models.targets import (
CreateTargetRequest,
+ TargetCapabilitiesInfo,
TargetInstance,
TargetListResponse,
)
@@ -100,6 +101,7 @@
"ScenarioSummary",
# Targets
"CreateTargetRequest",
+ "TargetCapabilitiesInfo",
"TargetInstance",
"TargetListResponse",
]
diff --git a/pyrit/backend/models/targets.py b/pyrit/backend/models/targets.py
index e9a28fb89f..e2eca8d911 100644
--- a/pyrit/backend/models/targets.py
+++ b/pyrit/backend/models/targets.py
@@ -19,27 +19,30 @@
class TargetCapabilitiesInfo(BaseModel):
- """Structured capability flags for a target instance."""
+ """
+ Wire-format snapshot of a target's capabilities.
+
+ Mirrors the domain ``TargetCapabilities`` dataclass for API consumers
+ (notably the GUI). Modality combinations (``frozenset[frozenset[...]]``)
+ are flattened into sorted unique modality lists since the frontend uses
+ them only for per-piece modality checks.
+ """
- supports_multi_turn: bool = Field(..., description="Whether the target supports multi-turn conversation history")
+ supports_multi_turn: bool = Field(False, description="Target natively supports multi-turn conversations")
supports_multi_message_pieces: bool = Field(
- ..., description="Whether the target supports multiple message pieces in a single request"
- )
- supports_json_schema: bool = Field(
- ..., description="Whether the target supports constraining output to a JSON schema"
- )
- supports_json_output: bool = Field(..., description="Whether the target supports JSON output format")
- supports_editable_history: bool = Field(
- ..., description="Whether the target allows the attack history to be modified"
+ False, description="Target supports multiple message pieces in a single request"
)
- supports_system_prompt: bool = Field(..., description="Whether the target supports system prompts")
+ supports_json_schema: bool = Field(False, description="Target can constrain output to a provided JSON schema")
+ supports_json_output: bool = Field(False, description="Target supports JSON output mode")
+ supports_editable_history: bool = Field(False, description="Target allows attack history to be modified")
+ supports_system_prompt: bool = Field(False, description="Target natively supports system prompts")
supported_input_modalities: list[str] = Field(
- default_factory=list,
- description="Flattened, sorted list of supported input modality data types (e.g., 'text', 'image_path')",
+ default_factory=lambda: ["text"],
+ description="Sorted unique input modality data types the target accepts (e.g., ['image_path', 'text'])",
)
supported_output_modalities: list[str] = Field(
- default_factory=list,
- description="Flattened, sorted list of supported output modality data types (e.g., 'text', 'audio_path')",
+ default_factory=lambda: ["text"],
+ description="Sorted unique output modality data types the target produces (e.g., ['audio_path', 'text'])",
)
@@ -61,8 +64,7 @@ class TargetInstance(BaseModel):
temperature: Optional[float] = Field(None, description="Temperature parameter for generation")
top_p: Optional[float] = Field(None, description="Top-p parameter for generation")
max_requests_per_minute: Optional[int] = Field(None, description="Maximum requests per minute")
- supports_multi_turn: bool = Field(True, description="Whether the target supports multi-turn conversation history")
- capabilities: Optional[TargetCapabilitiesInfo] = Field(None, description="Structured capability flags")
+ capabilities: TargetCapabilitiesInfo = Field(..., description="Structured snapshot of target capabilities")
target_specific_params: Optional[dict[str, Any]] = Field(None, description="Additional target-specific parameters")
diff --git a/tests/unit/backend/test_api_routes.py b/tests/unit/backend/test_api_routes.py
index 8a51458f6d..59bf407382 100644
--- a/tests/unit/backend/test_api_routes.py
+++ b/tests/unit/backend/test_api_routes.py
@@ -35,6 +35,7 @@
PreviewStep,
)
from pyrit.backend.models.targets import (
+ TargetCapabilitiesInfo,
TargetInstance,
TargetListResponse,
)
@@ -817,6 +818,7 @@ def test_create_target_success(self, client: TestClient) -> None:
return_value=TargetInstance(
target_registry_name="target-1",
target_type="TextTarget",
+ capabilities=TargetCapabilitiesInfo(),
)
)
mock_get_service.return_value = mock_service
@@ -866,6 +868,7 @@ def test_get_target_success(self, client: TestClient) -> None:
return_value=TargetInstance(
target_registry_name="target-1",
target_type="TextTarget",
+ capabilities=TargetCapabilitiesInfo(),
)
)
mock_get_service.return_value = mock_service
@@ -900,6 +903,7 @@ def test_list_targets_includes_target_specific_params(self, client: TestClient)
endpoint="https://api.openai.com",
model_name="o3",
temperature=1.0,
+ capabilities=TargetCapabilitiesInfo(supports_multi_turn=True),
target_specific_params={
"reasoning_effort": "high",
"reasoning_summary": "auto",
@@ -932,6 +936,7 @@ def test_get_target_includes_target_specific_params(self, client: TestClient) ->
endpoint="https://api.openai.com",
model_name="gpt-4",
temperature=0.7,
+ capabilities=TargetCapabilitiesInfo(supports_multi_turn=True),
target_specific_params={
"frequency_penalty": 0.5,
"presence_penalty": 0.3,
diff --git a/tests/unit/backend/test_mappers.py b/tests/unit/backend/test_mappers.py
index c6758bbaf0..ec1f403c9a 100644
--- a/tests/unit/backend/test_mappers.py
+++ b/tests/unit/backend/test_mappers.py
@@ -1102,7 +1102,7 @@ def test_no_get_identifier_uses_class_name(self) -> None:
assert result.model_name is None
def test_supports_multi_turn_true_when_capability_set(self) -> None:
- """Test that targets with supports_multi_turn capability have supports_multi_turn=True."""
+ """Test that targets with supports_multi_turn capability expose it via capabilities."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities(supports_multi_turn=True)
mock_identifier = ComponentIdentifier(
@@ -1117,10 +1117,10 @@ def test_supports_multi_turn_true_when_capability_set(self) -> None:
result = target_object_to_instance("t-1", target_obj)
- assert result.supports_multi_turn is True
+ assert result.capabilities.supports_multi_turn is True
def test_supports_multi_turn_false_when_capability_not_set(self) -> None:
- """Test that targets without supports_multi_turn capability have supports_multi_turn=False."""
+ """Test that targets without supports_multi_turn capability expose False via capabilities."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities(supports_multi_turn=False)
mock_identifier = ComponentIdentifier(
@@ -1131,7 +1131,67 @@ def test_supports_multi_turn_false_when_capability_not_set(self) -> None:
result = target_object_to_instance("t-1", target_obj)
- assert result.supports_multi_turn is False
+ assert result.capabilities.supports_multi_turn is False
+
+ def test_supports_multi_turn_not_extracted_from_identifier_params(self) -> None:
+ """Identifier-level supports_multi_turn must not leak into target_specific_params or override capabilities."""
+ target_obj = MagicMock(spec=PromptTarget)
+ target_obj.capabilities = TargetCapabilities(supports_multi_turn=True)
+ mock_identifier = ComponentIdentifier(
+ class_name="OpenAIChatTarget",
+ class_module="pyrit.prompt_target",
+ params={
+ "endpoint": "https://api.openai.com",
+ "model_name": "gpt-4",
+ "supports_multi_turn": False,
+ },
+ )
+ target_obj.get_identifier.return_value = mock_identifier
+
+ result = target_object_to_instance("t-1", target_obj)
+
+ assert result.capabilities.supports_multi_turn is True
+ # supports_multi_turn from identifier params should NOT bleed into target_specific_params
+ assert result.target_specific_params is None or "supports_multi_turn" not in result.target_specific_params
+
+ def test_capabilities_includes_all_capability_flags(self) -> None:
+ """Test that all boolean capability flags are exposed via the capabilities DTO."""
+ target_obj = MagicMock(spec=PromptTarget)
+ target_obj.capabilities = TargetCapabilities(
+ supports_multi_turn=True,
+ supports_multi_message_pieces=True,
+ supports_json_schema=True,
+ supports_json_output=True,
+ supports_editable_history=True,
+ supports_system_prompt=True,
+ )
+ mock_identifier = ComponentIdentifier(class_name="FullCapTarget", class_module="pyrit.prompt_target")
+ target_obj.get_identifier.return_value = mock_identifier
+
+ result = target_object_to_instance("t-1", target_obj)
+
+ assert result.capabilities.supports_multi_turn is True
+ assert result.capabilities.supports_multi_message_pieces is True
+ assert result.capabilities.supports_json_schema is True
+ assert result.capabilities.supports_json_output is True
+ assert result.capabilities.supports_editable_history is True
+ assert result.capabilities.supports_system_prompt is True
+
+ def test_capabilities_defaults_when_capabilities_minimal(self) -> None:
+ """Test that unset capability flags default to False."""
+ target_obj = MagicMock(spec=PromptTarget)
+ target_obj.capabilities = TargetCapabilities()
+ mock_identifier = ComponentIdentifier(class_name="MinimalTarget", class_module="pyrit.prompt_target")
+ target_obj.get_identifier.return_value = mock_identifier
+
+ result = target_object_to_instance("t-1", target_obj)
+
+ assert result.capabilities.supports_multi_turn is False
+ assert result.capabilities.supports_multi_message_pieces is False
+ assert result.capabilities.supports_json_schema is False
+ assert result.capabilities.supports_json_output is False
+ assert result.capabilities.supports_editable_history is False
+ assert result.capabilities.supports_system_prompt is False
def test_extra_params_in_target_specific_params(self) -> None:
"""Test that non-extracted params like reasoning_effort appear in target_specific_params."""
@@ -1257,91 +1317,100 @@ def test_chat_target_extra_params_preserved(self) -> None:
assert result.target_specific_params["seed"] == 42
assert result.target_specific_params["max_completion_tokens"] == 2048
- def test_capabilities_populated_from_target_object(self) -> None:
- """Test that all 6 capability fields are populated from target_obj.capabilities."""
+ def test_supported_input_modalities_text_only_default(self) -> None:
+ """Test that a target with default capabilities reports only 'text'."""
+ target_obj = MagicMock(spec=PromptTarget)
+ target_obj.capabilities = TargetCapabilities()
+ mock_identifier = ComponentIdentifier(class_name="TextTarget", class_module="pyrit.prompt_target")
+ target_obj.get_identifier.return_value = mock_identifier
+
+ result = target_object_to_instance("t-1", target_obj)
+
+ assert result.capabilities.supported_input_modalities == ["text"]
+
+ def test_supported_input_modalities_multimodal(self) -> None:
+ """Test that a multimodal target reports all individual input types."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities(
- supports_multi_turn=True,
- supports_multi_message_pieces=True,
- supports_json_schema=False,
- supports_json_output=True,
- supports_editable_history=False,
- supports_system_prompt=True,
+ input_modalities=frozenset(
+ {
+ frozenset({"text"}),
+ frozenset({"image_path"}),
+ frozenset({"text", "image_path"}),
+ }
+ ),
)
mock_identifier = ComponentIdentifier(
class_name="OpenAIChatTarget",
class_module="pyrit.prompt_target",
- params={"endpoint": "https://api.openai.com", "model_name": "gpt-4"},
)
target_obj.get_identifier.return_value = mock_identifier
result = target_object_to_instance("t-1", target_obj)
- assert result.capabilities is not None
- assert result.capabilities.supports_multi_turn is True
- assert result.capabilities.supports_multi_message_pieces is True
- assert result.capabilities.supports_json_schema is False
- assert result.capabilities.supports_json_output is True
- assert result.capabilities.supports_editable_history is False
- assert result.capabilities.supports_system_prompt is True
+ assert result.capabilities.supported_input_modalities == ["image_path", "text"]
- def test_capabilities_modalities_flattened_and_sorted(self) -> None:
- """Test that input/output modality combinations are flattened to a sorted list of types."""
+ def test_supported_input_modalities_audio_video(self) -> None:
+ """Test that a target supporting audio and video reports those types."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities(
input_modalities=frozenset(
{
frozenset({"text"}),
+ frozenset({"audio_path"}),
frozenset({"image_path"}),
- frozenset({"text", "image_path"}),
+ frozenset({"text", "audio_path", "image_path"}),
}
),
- output_modalities=frozenset({frozenset({"audio_path", "video_path"})}),
- )
- mock_identifier = ComponentIdentifier(
- class_name="CustomTarget",
- class_module="pyrit.prompt_target",
)
+ mock_identifier = ComponentIdentifier(class_name="RealtimeTarget", class_module="pyrit.prompt_target")
target_obj.get_identifier.return_value = mock_identifier
result = target_object_to_instance("t-1", target_obj)
- assert result.capabilities is not None
- assert result.capabilities.supported_input_modalities == ["image_path", "text"]
- assert result.capabilities.supported_output_modalities == ["audio_path", "video_path"]
+ assert result.capabilities.supported_input_modalities == ["audio_path", "image_path", "text"]
- def test_capabilities_default_modalities_are_text(self) -> None:
- """Targets that don't override modalities should default to ['text']."""
+ def test_supported_output_modalities_default_text(self) -> None:
+ """Test that a target with default capabilities reports only 'text' as output."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities()
- mock_identifier = ComponentIdentifier(
- class_name="TextTarget",
- class_module="pyrit.prompt_target",
- )
+ mock_identifier = ComponentIdentifier(class_name="TextTarget", class_module="pyrit.prompt_target")
target_obj.get_identifier.return_value = mock_identifier
result = target_object_to_instance("t-1", target_obj)
- assert result.capabilities is not None
- assert result.capabilities.supported_input_modalities == ["text"]
assert result.capabilities.supported_output_modalities == ["text"]
- def test_capabilities_matches_legacy_supports_multi_turn(self) -> None:
- """Test that legacy supports_multi_turn field matches capabilities.supports_multi_turn."""
+ def test_supported_output_modalities_image_target(self) -> None:
+ """Test that an image-output target reports 'image_path' in supported_output_modalities."""
target_obj = MagicMock(spec=PromptTarget)
- target_obj.capabilities = TargetCapabilities(supports_multi_turn=False)
- mock_identifier = ComponentIdentifier(
- class_name="TextTarget",
- class_module="pyrit.prompt_target",
+ target_obj.capabilities = TargetCapabilities(
+ output_modalities=frozenset({frozenset({"image_path"})}),
)
+ mock_identifier = ComponentIdentifier(class_name="OpenAIImageTarget", class_module="pyrit.prompt_target")
target_obj.get_identifier.return_value = mock_identifier
result = target_object_to_instance("t-1", target_obj)
- assert result.supports_multi_turn is False
- assert result.capabilities is not None
- assert result.capabilities.supports_multi_turn is False
- assert result.supports_multi_turn == result.capabilities.supports_multi_turn
+ assert result.capabilities.supported_output_modalities == ["image_path"]
+
+ def test_supported_output_modalities_video_with_audio(self) -> None:
+ """Test that a video target reports flattened sorted unique output modalities."""
+ target_obj = MagicMock(spec=PromptTarget)
+ target_obj.capabilities = TargetCapabilities(
+ output_modalities=frozenset(
+ {
+ frozenset({"audio_path", "video_path"}),
+ frozenset({"video_path"}),
+ }
+ ),
+ )
+ mock_identifier = ComponentIdentifier(class_name="SoraTarget", class_module="pyrit.prompt_target")
+ target_obj.get_identifier.return_value = mock_identifier
+
+ result = target_object_to_instance("t-1", target_obj)
+
+ assert result.capabilities.supported_output_modalities == ["audio_path", "video_path"]
def test_target_configuration_excluded_from_target_specific_params(self) -> None:
"""Test that the verbose target_configuration blob is filtered from target_specific_params."""