diff --git a/src/libs/OpenAI/Generated/JsonConverters.ListFilesResponseObject.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ListFilesOrder.g.cs similarity index 68% rename from src/libs/OpenAI/Generated/JsonConverters.ListFilesResponseObject.g.cs rename to src/libs/OpenAI/Generated/JsonConverters.ListFilesOrder.g.cs index 9ed155584..d76421fe9 100644 --- a/src/libs/OpenAI/Generated/JsonConverters.ListFilesResponseObject.g.cs +++ b/src/libs/OpenAI/Generated/JsonConverters.ListFilesOrder.g.cs @@ -3,10 +3,10 @@ namespace OpenAI.JsonConverters { /// - public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class ListFilesOrderJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::OpenAI.ListFilesResponseObject Read( + public override global::OpenAI.ListFilesOrder Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -18,7 +18,7 @@ public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.J var stringValue = reader.GetString(); if (stringValue != null) { - return global::OpenAI.ListFilesResponseObjectExtensions.ToEnum(stringValue) ?? default; + return global::OpenAI.ListFilesOrderExtensions.ToEnum(stringValue) ?? default; } break; @@ -26,7 +26,7 @@ public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.J case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::OpenAI.ListFilesResponseObject)numValue; + return (global::OpenAI.ListFilesOrder)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -38,12 +38,12 @@ public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.J /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.ListFilesResponseObject value, + global::OpenAI.ListFilesOrder value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - writer.WriteStringValue(global::OpenAI.ListFilesResponseObjectExtensions.ToValueString(value)); + writer.WriteStringValue(global::OpenAI.ListFilesOrderExtensions.ToValueString(value)); } } } diff --git a/src/libs/OpenAI/Generated/JsonConverters.ListFilesResponseObjectNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ListFilesOrderNullable.g.cs similarity index 74% rename from src/libs/OpenAI/Generated/JsonConverters.ListFilesResponseObjectNullable.g.cs rename to src/libs/OpenAI/Generated/JsonConverters.ListFilesOrderNullable.g.cs index 937d0941b..4dc718753 100644 --- a/src/libs/OpenAI/Generated/JsonConverters.ListFilesResponseObjectNullable.g.cs +++ b/src/libs/OpenAI/Generated/JsonConverters.ListFilesOrderNullable.g.cs @@ -3,10 +3,10 @@ namespace OpenAI.JsonConverters { /// - public sealed class ListFilesResponseObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class ListFilesOrderNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::OpenAI.ListFilesResponseObject? Read( + public override global::OpenAI.ListFilesOrder? Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -18,7 +18,7 @@ public sealed class ListFilesResponseObjectNullableJsonConverter : global::Syste var stringValue = reader.GetString(); if (stringValue != null) { - return global::OpenAI.ListFilesResponseObjectExtensions.ToEnum(stringValue); + return global::OpenAI.ListFilesOrderExtensions.ToEnum(stringValue); } break; @@ -26,7 +26,7 @@ public sealed class ListFilesResponseObjectNullableJsonConverter : global::Syste case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::OpenAI.ListFilesResponseObject)numValue; + return (global::OpenAI.ListFilesOrder)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -38,7 +38,7 @@ public sealed class ListFilesResponseObjectNullableJsonConverter : global::Syste /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.ListFilesResponseObject? value, + global::OpenAI.ListFilesOrder? value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); @@ -49,7 +49,7 @@ public override void Write( } else { - writer.WriteStringValue(global::OpenAI.ListFilesResponseObjectExtensions.ToValueString(value.Value)); + writer.WriteStringValue(global::OpenAI.ListFilesOrderExtensions.ToValueString(value.Value)); } } } diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.PredictionContentType.g.cs similarity index 61% rename from src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs rename to src/libs/OpenAI/Generated/JsonConverters.PredictionContentType.g.cs index fcb4f135a..b06e7f391 100644 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs +++ b/src/libs/OpenAI/Generated/JsonConverters.PredictionContentType.g.cs @@ -3,10 +3,10 @@ namespace OpenAI.JsonConverters { /// - public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class PredictionContentTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens Read( + public override global::OpenAI.PredictionContentType Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -18,7 +18,7 @@ public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensJsonCo var stringValue = reader.GetString(); if (stringValue != null) { - return global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; + return global::OpenAI.PredictionContentTypeExtensions.ToEnum(stringValue) ?? default; } break; @@ -26,7 +26,7 @@ public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensJsonCo case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens)numValue; + return (global::OpenAI.PredictionContentType)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -38,12 +38,12 @@ public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensJsonCo /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens value, + global::OpenAI.PredictionContentType value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - writer.WriteStringValue(global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToValueString(value)); + writer.WriteStringValue(global::OpenAI.PredictionContentTypeExtensions.ToValueString(value)); } } } diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.PredictionContentTypeNullable.g.cs similarity index 64% rename from src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullable.g.cs rename to src/libs/OpenAI/Generated/JsonConverters.PredictionContentTypeNullable.g.cs index ae8792b99..39186a265 100644 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullable.g.cs +++ b/src/libs/OpenAI/Generated/JsonConverters.PredictionContentTypeNullable.g.cs @@ -3,10 +3,10 @@ namespace OpenAI.JsonConverters { /// - public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class PredictionContentTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens? Read( + public override global::OpenAI.PredictionContentType? Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -18,7 +18,7 @@ public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullab var stringValue = reader.GetString(); if (stringValue != null) { - return global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToEnum(stringValue); + return global::OpenAI.PredictionContentTypeExtensions.ToEnum(stringValue); } break; @@ -26,7 +26,7 @@ public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullab case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens)numValue; + return (global::OpenAI.PredictionContentType)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -38,7 +38,7 @@ public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullab /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens? value, + global::OpenAI.PredictionContentType? value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); @@ -49,7 +49,7 @@ public override void Write( } else { - writer.WriteStringValue(global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToValueString(value.Value)); + writer.WriteStringValue(global::OpenAI.PredictionContentTypeExtensions.ToValueString(value.Value)); } } } diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs deleted file mode 100644 index a494c1aa1..000000000 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs +++ /dev/null @@ -1,49 +0,0 @@ -#nullable enable - -namespace OpenAI.JsonConverters -{ - /// - public sealed class RealtimeClientEventResponseCreateResponseMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter - { - /// - public override global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens Read( - ref global::System.Text.Json.Utf8JsonReader reader, - global::System.Type typeToConvert, - global::System.Text.Json.JsonSerializerOptions options) - { - switch (reader.TokenType) - { - case global::System.Text.Json.JsonTokenType.String: - { - var stringValue = reader.GetString(); - if (stringValue != null) - { - return global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; - } - - break; - } - case global::System.Text.Json.JsonTokenType.Number: - { - var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens)numValue; - } - default: - throw new global::System.ArgumentOutOfRangeException(nameof(reader)); - } - - return default; - } - - /// - public override void Write( - global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens value, - global::System.Text.Json.JsonSerializerOptions options) - { - writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - - writer.WriteStringValue(global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToValueString(value)); - } - } -} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullable.g.cs deleted file mode 100644 index 8da6e0172..000000000 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullable.g.cs +++ /dev/null @@ -1,56 +0,0 @@ -#nullable enable - -namespace OpenAI.JsonConverters -{ - /// - public sealed class RealtimeClientEventResponseCreateResponseMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter - { - /// - public override global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens? Read( - ref global::System.Text.Json.Utf8JsonReader reader, - global::System.Type typeToConvert, - global::System.Text.Json.JsonSerializerOptions options) - { - switch (reader.TokenType) - { - case global::System.Text.Json.JsonTokenType.String: - { - var stringValue = reader.GetString(); - if (stringValue != null) - { - return global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToEnum(stringValue); - } - - break; - } - case global::System.Text.Json.JsonTokenType.Number: - { - var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens)numValue; - } - default: - throw new global::System.ArgumentOutOfRangeException(nameof(reader)); - } - - return default; - } - - /// - public override void Write( - global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens? value, - global::System.Text.Json.JsonSerializerOptions options) - { - writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - - if (value == null) - { - writer.WriteNullValue(); - } - else - { - writer.WriteStringValue(global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToValueString(value.Value)); - } - } - } -} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs deleted file mode 100644 index eec91bb36..000000000 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs +++ /dev/null @@ -1,49 +0,0 @@ -#nullable enable - -namespace OpenAI.JsonConverters -{ - /// - public sealed class RealtimeServerEventSessionCreatedSessionMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter - { - /// - public override global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens Read( - ref global::System.Text.Json.Utf8JsonReader reader, - global::System.Type typeToConvert, - global::System.Text.Json.JsonSerializerOptions options) - { - switch (reader.TokenType) - { - case global::System.Text.Json.JsonTokenType.String: - { - var stringValue = reader.GetString(); - if (stringValue != null) - { - return global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; - } - - break; - } - case global::System.Text.Json.JsonTokenType.Number: - { - var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens)numValue; - } - default: - throw new global::System.ArgumentOutOfRangeException(nameof(reader)); - } - - return default; - } - - /// - public override void Write( - global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens value, - global::System.Text.Json.JsonSerializerOptions options) - { - writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - - writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToValueString(value)); - } - } -} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullable.g.cs deleted file mode 100644 index 368b1f638..000000000 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullable.g.cs +++ /dev/null @@ -1,56 +0,0 @@ -#nullable enable - -namespace OpenAI.JsonConverters -{ - /// - public sealed class RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter - { - /// - public override global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens? Read( - ref global::System.Text.Json.Utf8JsonReader reader, - global::System.Type typeToConvert, - global::System.Text.Json.JsonSerializerOptions options) - { - switch (reader.TokenType) - { - case global::System.Text.Json.JsonTokenType.String: - { - var stringValue = reader.GetString(); - if (stringValue != null) - { - return global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToEnum(stringValue); - } - - break; - } - case global::System.Text.Json.JsonTokenType.Number: - { - var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens)numValue; - } - default: - throw new global::System.ArgumentOutOfRangeException(nameof(reader)); - } - - return default; - } - - /// - public override void Write( - global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens? value, - global::System.Text.Json.JsonSerializerOptions options) - { - writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - - if (value == null) - { - writer.WriteNullValue(); - } - else - { - writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToValueString(value.Value)); - } - } - } -} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs deleted file mode 100644 index 3a52d9717..000000000 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs +++ /dev/null @@ -1,49 +0,0 @@ -#nullable enable - -namespace OpenAI.JsonConverters -{ - /// - public sealed class RealtimeServerEventSessionUpdatedSessionMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter - { - /// - public override global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens Read( - ref global::System.Text.Json.Utf8JsonReader reader, - global::System.Type typeToConvert, - global::System.Text.Json.JsonSerializerOptions options) - { - switch (reader.TokenType) - { - case global::System.Text.Json.JsonTokenType.String: - { - var stringValue = reader.GetString(); - if (stringValue != null) - { - return global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; - } - - break; - } - case global::System.Text.Json.JsonTokenType.Number: - { - var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens)numValue; - } - default: - throw new global::System.ArgumentOutOfRangeException(nameof(reader)); - } - - return default; - } - - /// - public override void Write( - global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens value, - global::System.Text.Json.JsonSerializerOptions options) - { - writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - - writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToValueString(value)); - } - } -} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullable.g.cs deleted file mode 100644 index d8caf3cc9..000000000 --- a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullable.g.cs +++ /dev/null @@ -1,56 +0,0 @@ -#nullable enable - -namespace OpenAI.JsonConverters -{ - /// - public sealed class RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter - { - /// - public override global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? Read( - ref global::System.Text.Json.Utf8JsonReader reader, - global::System.Type typeToConvert, - global::System.Text.Json.JsonSerializerOptions options) - { - switch (reader.TokenType) - { - case global::System.Text.Json.JsonTokenType.String: - { - var stringValue = reader.GetString(); - if (stringValue != null) - { - return global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToEnum(stringValue); - } - - break; - } - case global::System.Text.Json.JsonTokenType.Number: - { - var numValue = reader.GetInt32(); - return (global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens)numValue; - } - default: - throw new global::System.ArgumentOutOfRangeException(nameof(reader)); - } - - return default; - } - - /// - public override void Write( - global::System.Text.Json.Utf8JsonWriter writer, - global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? value, - global::System.Text.Json.JsonSerializerOptions options) - { - writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - - if (value == null) - { - writer.WriteNullValue(); - } - else - { - writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToValueString(value.Value)); - } - } - } -} diff --git a/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs b/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs index bbc1e939b..24a82fce9 100644 --- a/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs +++ b/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs @@ -301,6 +301,8 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestModelJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.PredictionContentTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.PredictionContentTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioVoiceJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioVoiceNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioFormatJsonConverter), @@ -493,8 +495,6 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.OpenAIFilePurposeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.OpenAIFileStatusJsonConverter), typeof(global::OpenAI.JsonConverters.OpenAIFileStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFilesResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFilesResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListFineTuningJobCheckpointsResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.ListFineTuningJobCheckpointsResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListFineTuningJobEventsResponseObjectJsonConverter), @@ -561,14 +561,32 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.ProjectUserDeleteResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectUserUpdateRequestRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectUserUpdateRequestRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemContentItemTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeConversationItemContentItemTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensNullableJsonConverter), typeof(global::OpenAI.JsonConverters.UploadStatusJsonConverter), typeof(global::OpenAI.JsonConverters.UploadStatusNullableJsonConverter), typeof(global::OpenAI.JsonConverters.UploadObjectJsonConverter), @@ -591,36 +609,10 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectStatusNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeConversationObjectJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeConversationObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemContentItemTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeConversationItemContentItemTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeContentPartTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeContentPartTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeResponseStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeResponseStatusNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeSessionUpdateTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeSessionUpdateTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeInputAudioBufferAppendTypeJsonConverter), @@ -713,6 +705,8 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.CreateBatchRequestCompletionWindowNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListAssistantsOrderJsonConverter), typeof(global::OpenAI.JsonConverters.ListAssistantsOrderNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFilesOrderJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFilesOrderNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListMessagesOrderJsonConverter), typeof(global::OpenAI.JsonConverters.ListMessagesOrderNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListRunsOrderJsonConverter), diff --git a/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs b/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs index a06752906..1aa5a6af4 100644 --- a/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs +++ b/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs @@ -1786,2510 +1786,2362 @@ public sealed partial class JsonSerializerContextTypes /// /// /// - public global::OpenAI.CreateChatCompletionRequestAudio? Type442 { get; set; } + public global::OpenAI.PredictionContent? Type442 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestAudioVoice? Type443 { get; set; } + public global::OpenAI.PredictionContentType? Type443 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestAudioFormat? Type444 { get; set; } + public global::OpenAI.OneOf>? Type444 { get; set; } /// /// /// - public global::OpenAI.ResponseFormat? Type445 { get; set; } + public global::System.Collections.Generic.IList? Type445 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminator? Type446 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudio? Type446 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminatorType? Type447 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudioVoice? Type447 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestServiceTier? Type448 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudioFormat? Type448 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type449 { get; set; } + public global::OpenAI.ResponseFormat? Type449 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type450 { get; set; } + public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminator? Type450 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type451 { get; set; } + public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminatorType? Type451 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestFunctionCall? Type452 { get; set; } + public global::OpenAI.CreateChatCompletionRequestServiceTier? Type452 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type453 { get; set; } + public global::OpenAI.OneOf>? Type453 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponse? Type454 { get; set; } + public global::System.Collections.Generic.IList? Type454 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type455 { get; set; } + public global::OpenAI.OneOf? Type455 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoice? Type456 { get; set; } + public global::OpenAI.CreateChatCompletionRequestFunctionCall? Type456 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoiceFinishReason? Type457 { get; set; } + public global::System.Collections.Generic.IList? Type457 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoiceLogprobs? Type458 { get; set; } + public global::OpenAI.CreateChatCompletionResponse? Type458 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type459 { get; set; } + public global::System.Collections.Generic.IList? Type459 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseServiceTier? Type460 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoice? Type460 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseObject? Type461 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoiceFinishReason? Type461 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponse? Type462 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoiceLogprobs? Type462 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type463 { get; set; } + public global::System.Collections.Generic.IList? Type463 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoice? Type464 { get; set; } + public global::OpenAI.CreateChatCompletionResponseServiceTier? Type464 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoiceLogprobs? Type465 { get; set; } + public global::OpenAI.CreateChatCompletionResponseObject? Type465 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoiceFinishReason? Type466 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponse? Type466 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseServiceTier? Type467 { get; set; } + public global::System.Collections.Generic.IList? Type467 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseObject? Type468 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoice? Type468 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseUsage? Type469 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoiceLogprobs? Type469 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionRequest? Type470 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoiceFinishReason? Type470 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type471 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseServiceTier? Type471 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionRequestModel? Type472 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseObject? Type472 { get; set; } /// /// /// - public global::OpenAI.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type473 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseUsage? Type473 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type474 { get; set; } + public global::OpenAI.CreateCompletionRequest? Type474 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponse? Type475 { get; set; } + public global::OpenAI.AnyOf? Type475 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type476 { get; set; } + public global::OpenAI.CreateCompletionRequestModel? Type476 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoice? Type477 { get; set; } + public global::OpenAI.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type477 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoiceFinishReason? Type478 { get; set; } + public global::System.Collections.Generic.IList>? Type478 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoiceLogprobs? Type479 { get; set; } + public global::OpenAI.CreateCompletionResponse? Type479 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type480 { get; set; } + public global::System.Collections.Generic.IList? Type480 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type481 { get; set; } + public global::OpenAI.CreateCompletionResponseChoice? Type481 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type482 { get; set; } + public global::OpenAI.CreateCompletionResponseChoiceFinishReason? Type482 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseObject? Type483 { get; set; } + public global::OpenAI.CreateCompletionResponseChoiceLogprobs? Type483 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequest? Type484 { get; set; } + public global::System.Collections.Generic.IList? Type484 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type485 { get; set; } + public global::System.Collections.Generic.IList>? Type485 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequestModel? Type486 { get; set; } + public global::System.Collections.Generic.Dictionary? Type486 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequestEncodingFormat? Type487 { get; set; } + public global::OpenAI.CreateCompletionResponseObject? Type487 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponse? Type488 { get; set; } + public global::OpenAI.CreateEmbeddingRequest? Type488 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type489 { get; set; } + public global::OpenAI.AnyOf? Type489 { get; set; } /// /// /// - public global::OpenAI.Embedding? Type490 { get; set; } + public global::OpenAI.CreateEmbeddingRequestModel? Type490 { get; set; } /// /// /// - public global::OpenAI.EmbeddingObject? Type491 { get; set; } + public global::OpenAI.CreateEmbeddingRequestEncodingFormat? Type491 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponseObject? Type492 { get; set; } + public global::OpenAI.CreateEmbeddingResponse? Type492 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponseUsage? Type493 { get; set; } + public global::System.Collections.Generic.IList? Type493 { get; set; } /// /// /// - public global::OpenAI.CreateFileRequest? Type494 { get; set; } + public global::OpenAI.Embedding? Type494 { get; set; } /// /// /// - public global::OpenAI.CreateFileRequestPurpose? Type495 { get; set; } + public global::OpenAI.EmbeddingObject? Type495 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequest? Type496 { get; set; } + public global::OpenAI.CreateEmbeddingResponseObject? Type496 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type497 { get; set; } + public global::OpenAI.CreateEmbeddingResponseUsage? Type497 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestModel? Type498 { get; set; } + public global::OpenAI.CreateFileRequest? Type498 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparameters? Type499 { get; set; } + public global::OpenAI.CreateFileRequestPurpose? Type499 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type500 { get; set; } + public global::OpenAI.CreateFineTuningJobRequest? Type500 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersBatchSize? Type501 { get; set; } + public global::OpenAI.AnyOf? Type501 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type502 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestModel? Type502 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type503 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparameters? Type503 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type504 { get; set; } + public global::OpenAI.OneOf? Type504 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersNEpochs? Type505 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersBatchSize? Type505 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type506 { get; set; } + public global::OpenAI.OneOf? Type506 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegration? Type507 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type507 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegrationType? Type508 { get; set; } + public global::OpenAI.OneOf? Type508 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegrationWandb? Type509 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersNEpochs? Type509 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequest? Type510 { get; set; } + public global::System.Collections.Generic.IList? Type510 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type511 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegration? Type511 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestModel? Type512 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegrationType? Type512 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestSize? Type513 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegrationWandb? Type513 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestResponseFormat? Type514 { get; set; } + public global::OpenAI.CreateImageEditRequest? Type514 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequest? Type515 { get; set; } + public global::OpenAI.AnyOf? Type515 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type516 { get; set; } + public global::OpenAI.CreateImageEditRequestModel? Type516 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestModel? Type517 { get; set; } + public global::OpenAI.CreateImageEditRequestSize? Type517 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestQuality? Type518 { get; set; } + public global::OpenAI.CreateImageEditRequestResponseFormat? Type518 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestResponseFormat? Type519 { get; set; } + public global::OpenAI.CreateImageRequest? Type519 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestSize? Type520 { get; set; } + public global::OpenAI.AnyOf? Type520 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestStyle? Type521 { get; set; } + public global::OpenAI.CreateImageRequestModel? Type521 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequest? Type522 { get; set; } + public global::OpenAI.CreateImageRequestQuality? Type522 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type523 { get; set; } + public global::OpenAI.CreateImageRequestResponseFormat? Type523 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestModel? Type524 { get; set; } + public global::OpenAI.CreateImageRequestSize? Type524 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestResponseFormat? Type525 { get; set; } + public global::OpenAI.CreateImageRequestStyle? Type525 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestSize? Type526 { get; set; } + public global::OpenAI.CreateImageVariationRequest? Type526 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequest? Type527 { get; set; } + public global::OpenAI.AnyOf? Type527 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestRole? Type528 { get; set; } + public global::OpenAI.CreateImageVariationRequestModel? Type528 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type529 { get; set; } + public global::OpenAI.CreateImageVariationRequestResponseFormat? Type529 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type530 { get; set; } + public global::OpenAI.CreateImageVariationRequestSize? Type530 { get; set; } /// /// /// - public global::OpenAI.ContentVariant2Item? Type531 { get; set; } + public global::OpenAI.CreateMessageRequest? Type531 { get; set; } /// /// /// - public global::OpenAI.MessageRequestContentTextObject? Type532 { get; set; } + public global::OpenAI.CreateMessageRequestRole? Type532 { get; set; } /// /// /// - public global::OpenAI.MessageRequestContentTextObjectType? Type533 { get; set; } + public global::OpenAI.OneOf>? Type533 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminator? Type534 { get; set; } + public global::System.Collections.Generic.IList? Type534 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminatorType? Type535 { get; set; } + public global::OpenAI.ContentVariant2Item? Type535 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type536 { get; set; } + public global::OpenAI.MessageRequestContentTextObject? Type536 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestAttachment? Type537 { get; set; } + public global::OpenAI.MessageRequestContentTextObjectType? Type537 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type538 { get; set; } + public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminator? Type538 { get; set; } /// /// /// - public global::OpenAI.ToolsItem3? Type539 { get; set; } + public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminatorType? Type539 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminator? Type540 { get; set; } + public global::System.Collections.Generic.IList? Type540 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminatorType? Type541 { get; set; } + public global::OpenAI.CreateMessageRequestAttachment? Type541 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequest? Type542 { get; set; } + public global::System.Collections.Generic.IList? Type542 { get; set; } /// /// /// - public global::OpenAI.OneOf, global::System.Collections.Generic.IList>? Type543 { get; set; } + public global::OpenAI.ToolsItem3? Type543 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type544 { get; set; } + public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminator? Type544 { get; set; } /// /// /// - public global::OpenAI.InputVariant3Item? Type545 { get; set; } + public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminatorType? Type545 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1? Type546 { get; set; } + public global::OpenAI.CreateModerationRequest? Type546 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1Type? Type547 { get; set; } + public global::OpenAI.OneOf, global::System.Collections.Generic.IList>? Type547 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? Type548 { get; set; } + public global::System.Collections.Generic.IList? Type548 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2? Type549 { get; set; } + public global::OpenAI.InputVariant3Item? Type549 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2Type? Type550 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1? Type550 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminator? Type551 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1Type? Type551 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type552 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? Type552 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type553 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2? Type553 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestModel? Type554 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2Type? Type554 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponse? Type555 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminator? Type555 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type556 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type556 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResult? Type557 { get; set; } + public global::OpenAI.AnyOf? Type557 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategories? Type558 { get; set; } + public global::OpenAI.CreateModerationRequestModel? Type558 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryScores? Type559 { get; set; } + public global::OpenAI.CreateModerationResponse? Type559 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypes? Type560 { get; set; } + public global::System.Collections.Generic.IList? Type560 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type561 { get; set; } + public global::OpenAI.CreateModerationResponseResult? Type561 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Type562 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategories? Type562 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type563 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryScores? Type563 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Type564 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypes? Type564 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type565 { get; set; } + public global::System.Collections.Generic.IList? Type565 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Type566 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Type566 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type567 { get; set; } + public global::System.Collections.Generic.IList? Type567 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Type568 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Type568 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type569 { get; set; } + public global::System.Collections.Generic.IList? Type569 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Type570 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Type570 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type571 { get; set; } + public global::System.Collections.Generic.IList? Type571 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Type572 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Type572 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type573 { get; set; } + public global::System.Collections.Generic.IList? Type573 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Type574 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Type574 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type575 { get; set; } + public global::System.Collections.Generic.IList? Type575 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Type576 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Type576 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type577 { get; set; } + public global::System.Collections.Generic.IList? Type577 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Type578 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Type578 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type579 { get; set; } + public global::System.Collections.Generic.IList? Type579 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Type580 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Type580 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type581 { get; set; } + public global::System.Collections.Generic.IList? Type581 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Type582 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Type582 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type583 { get; set; } + public global::System.Collections.Generic.IList? Type583 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Type584 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Type584 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type585 { get; set; } + public global::System.Collections.Generic.IList? Type585 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Type586 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Type586 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequest? Type587 { get; set; } + public global::System.Collections.Generic.IList? Type587 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type588 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Type588 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequestModel? Type589 { get; set; } + public global::System.Collections.Generic.IList? Type589 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type590 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Type590 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type591 { get; set; } + public global::OpenAI.CreateRunRequest? Type591 { get; set; } /// /// /// - public global::OpenAI.ToolsItem4? Type592 { get; set; } + public global::OpenAI.AnyOf? Type592 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequestToolDiscriminator? Type593 { get; set; } + public global::OpenAI.CreateRunRequestModel? Type593 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequestToolDiscriminatorType? Type594 { get; set; } + public global::System.Collections.Generic.IList? Type594 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequest? Type595 { get; set; } + public global::System.Collections.Generic.IList? Type595 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type596 { get; set; } + public global::OpenAI.ToolsItem4? Type596 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestModel? Type597 { get; set; } + public global::OpenAI.CreateRunRequestToolDiscriminator? Type597 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestVoice? Type598 { get; set; } + public global::OpenAI.CreateRunRequestToolDiscriminatorType? Type598 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestResponseFormat? Type599 { get; set; } + public global::OpenAI.CreateSpeechRequest? Type599 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequest? Type600 { get; set; } + public global::OpenAI.AnyOf? Type600 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequest? Type601 { get; set; } + public global::OpenAI.CreateSpeechRequestModel? Type601 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResources? Type602 { get; set; } + public global::OpenAI.CreateSpeechRequestVoice? Type602 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesCodeInterpreter? Type603 { get; set; } + public global::OpenAI.CreateSpeechRequestResponseFormat? Type603 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearch? Type604 { get; set; } + public global::OpenAI.CreateThreadAndRunRequest? Type604 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type605 { get; set; } + public global::OpenAI.CreateThreadRequest? Type605 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStore? Type606 { get; set; } + public global::OpenAI.CreateThreadRequestToolResources? Type606 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type607 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesCodeInterpreter? Type607 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type608 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearch? Type608 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type609 { get; set; } + public global::System.Collections.Generic.IList? Type609 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type610 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStore? Type610 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type611 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type611 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type612 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type612 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type613 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type613 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type614 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type614 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type615 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type615 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestModel? Type616 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type616 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type617 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type617 { get; set; } /// /// /// - public global::OpenAI.ToolsItem5? Type618 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type618 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolDiscriminator? Type619 { get; set; } + public global::OpenAI.AnyOf? Type619 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolDiscriminatorType? Type620 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestModel? Type620 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResources? Type621 { get; set; } + public global::System.Collections.Generic.IList? Type621 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type622 { get; set; } + public global::OpenAI.ToolsItem5? Type622 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch? Type623 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolDiscriminator? Type623 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequest? Type624 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolDiscriminatorType? Type624 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type625 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResources? Type625 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequestModel? Type626 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type626 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type627 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch? Type627 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequestTimestampGranularitie? Type628 { get; set; } + public global::OpenAI.CreateTranscriptionRequest? Type628 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionResponseJson? Type629 { get; set; } + public global::OpenAI.AnyOf? Type629 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionResponseVerboseJson? Type630 { get; set; } + public global::OpenAI.CreateTranscriptionRequestModel? Type630 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type631 { get; set; } + public global::System.Collections.Generic.IList? Type631 { get; set; } /// /// /// - public global::OpenAI.TranscriptionWord? Type632 { get; set; } + public global::OpenAI.CreateTranscriptionRequestTimestampGranularitie? Type632 { get; set; } /// /// /// - public float? Type633 { get; set; } + public global::OpenAI.CreateTranscriptionResponseJson? Type633 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type634 { get; set; } + public global::OpenAI.CreateTranscriptionResponseVerboseJson? Type634 { get; set; } /// /// /// - public global::OpenAI.TranscriptionSegment? Type635 { get; set; } + public global::System.Collections.Generic.IList? Type635 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationRequest? Type636 { get; set; } + public global::OpenAI.TranscriptionWord? Type636 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type637 { get; set; } + public float? Type637 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationRequestModel? Type638 { get; set; } + public global::System.Collections.Generic.IList? Type638 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationResponseJson? Type639 { get; set; } + public global::OpenAI.TranscriptionSegment? Type639 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationResponseVerboseJson? Type640 { get; set; } + public global::OpenAI.CreateTranslationRequest? Type640 { get; set; } /// /// /// - public global::OpenAI.CreateUploadRequest? Type641 { get; set; } + public global::OpenAI.AnyOf? Type641 { get; set; } /// /// /// - public global::OpenAI.CreateUploadRequestPurpose? Type642 { get; set; } + public global::OpenAI.CreateTranslationRequestModel? Type642 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreFileBatchRequest? Type643 { get; set; } + public global::OpenAI.CreateTranslationResponseJson? Type643 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreFileRequest? Type644 { get; set; } + public global::OpenAI.CreateTranslationResponseVerboseJson? Type644 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequest? Type645 { get; set; } + public global::OpenAI.CreateUploadRequest? Type645 { get; set; } /// /// /// - public global::OpenAI.VectorStoreExpirationAfter? Type646 { get; set; } + public global::OpenAI.CreateUploadRequestPurpose? Type646 { get; set; } /// /// /// - public global::OpenAI.VectorStoreExpirationAfterAnchor? Type647 { get; set; } + public global::OpenAI.CreateVectorStoreFileBatchRequest? Type647 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequestChunkingStrategy? Type648 { get; set; } + public global::OpenAI.CreateVectorStoreFileRequest? Type648 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminator? Type649 { get; set; } + public global::OpenAI.CreateVectorStoreRequest? Type649 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminatorType? Type650 { get; set; } + public global::OpenAI.VectorStoreExpirationAfter? Type650 { get; set; } /// /// /// - public global::OpenAI.DefaultProjectErrorResponse? Type651 { get; set; } + public global::OpenAI.VectorStoreExpirationAfterAnchor? Type651 { get; set; } /// /// /// - public global::OpenAI.DeleteAssistantResponse? Type652 { get; set; } + public global::OpenAI.CreateVectorStoreRequestChunkingStrategy? Type652 { get; set; } /// /// /// - public global::OpenAI.DeleteAssistantResponseObject? Type653 { get; set; } + public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminator? Type653 { get; set; } /// /// /// - public global::OpenAI.DeleteFileResponse? Type654 { get; set; } + public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminatorType? Type654 { get; set; } /// /// /// - public global::OpenAI.DeleteFileResponseObject? Type655 { get; set; } + public global::OpenAI.DefaultProjectErrorResponse? Type655 { get; set; } /// /// /// - public global::OpenAI.DeleteMessageResponse? Type656 { get; set; } + public global::OpenAI.DeleteAssistantResponse? Type656 { get; set; } /// /// /// - public global::OpenAI.DeleteMessageResponseObject? Type657 { get; set; } + public global::OpenAI.DeleteAssistantResponseObject? Type657 { get; set; } /// /// /// - public global::OpenAI.DeleteModelResponse? Type658 { get; set; } + public global::OpenAI.DeleteFileResponse? Type658 { get; set; } /// /// /// - public global::OpenAI.DeleteThreadResponse? Type659 { get; set; } + public global::OpenAI.DeleteFileResponseObject? Type659 { get; set; } /// /// /// - public global::OpenAI.DeleteThreadResponseObject? Type660 { get; set; } + public global::OpenAI.DeleteMessageResponse? Type660 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreFileResponse? Type661 { get; set; } + public global::OpenAI.DeleteMessageResponseObject? Type661 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreFileResponseObject? Type662 { get; set; } + public global::OpenAI.DeleteModelResponse? Type662 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreResponse? Type663 { get; set; } + public global::OpenAI.DeleteThreadResponse? Type663 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreResponseObject? Type664 { get; set; } + public global::OpenAI.DeleteThreadResponseObject? Type664 { get; set; } /// /// /// - public global::OpenAI.ErrorResponse? Type665 { get; set; } + public global::OpenAI.DeleteVectorStoreFileResponse? Type665 { get; set; } /// /// /// - public global::OpenAI.FineTuneChatCompletionRequestAssistantMessage? Type666 { get; set; } + public global::OpenAI.DeleteVectorStoreFileResponseObject? Type666 { get; set; } /// /// /// - public global::OpenAI.FineTuneChatCompletionRequestAssistantMessageVariant1? Type667 { get; set; } + public global::OpenAI.DeleteVectorStoreResponse? Type667 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegration? Type668 { get; set; } + public global::OpenAI.DeleteVectorStoreResponseObject? Type668 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegrationType? Type669 { get; set; } + public global::OpenAI.ErrorResponse? Type669 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegrationWandb? Type670 { get; set; } + public global::OpenAI.FineTuneChatCompletionRequestAssistantMessage? Type670 { get; set; } /// /// /// - public global::OpenAI.FineTuningJob? Type671 { get; set; } + public global::OpenAI.FineTuneChatCompletionRequestAssistantMessageVariant1? Type671 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobError? Type672 { get; set; } + public global::OpenAI.FineTuningIntegration? Type672 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobHyperparameters? Type673 { get; set; } + public global::OpenAI.FineTuningIntegrationType? Type673 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type674 { get; set; } + public global::OpenAI.FineTuningIntegrationWandb? Type674 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobHyperparametersNEpochs? Type675 { get; set; } + public global::OpenAI.FineTuningJob? Type675 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobObject? Type676 { get; set; } + public global::OpenAI.FineTuningJobError? Type676 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobStatus? Type677 { get; set; } + public global::OpenAI.FineTuningJobHyperparameters? Type677 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type678 { get; set; } + public global::OpenAI.OneOf? Type678 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type679 { get; set; } + public global::OpenAI.FineTuningJobHyperparametersNEpochs? Type679 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpoint? Type680 { get; set; } + public global::OpenAI.FineTuningJobObject? Type680 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpointMetrics? Type681 { get; set; } + public global::OpenAI.FineTuningJobStatus? Type681 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpointObject? Type682 { get; set; } + public global::System.Collections.Generic.IList>? Type682 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEvent? Type683 { get; set; } + public global::OpenAI.OneOf? Type683 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEventLevel? Type684 { get; set; } + public global::OpenAI.FineTuningJobCheckpoint? Type684 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEventObject? Type685 { get; set; } + public global::OpenAI.FineTuningJobCheckpointMetrics? Type685 { get; set; } /// /// /// - public global::OpenAI.FinetuneChatRequestInput? Type686 { get; set; } + public global::OpenAI.FineTuningJobCheckpointObject? Type686 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type687 { get; set; } + public global::OpenAI.FineTuningJobEvent? Type687 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type688 { get; set; } + public global::OpenAI.FineTuningJobEventLevel? Type688 { get; set; } /// /// /// - public global::OpenAI.FinetuneCompletionRequestInput? Type689 { get; set; } + public global::OpenAI.FineTuningJobEventObject? Type689 { get; set; } /// /// /// - public global::OpenAI.Image? Type690 { get; set; } + public global::OpenAI.FinetuneChatRequestInput? Type690 { get; set; } /// /// /// - public global::OpenAI.ImagesResponse? Type691 { get; set; } + public global::System.Collections.Generic.IList>? Type691 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type692 { get; set; } + public global::OpenAI.OneOf? Type692 { get; set; } /// /// /// - public global::OpenAI.Invite? Type693 { get; set; } + public global::OpenAI.FinetuneCompletionRequestInput? Type693 { get; set; } /// /// /// - public global::OpenAI.InviteObject? Type694 { get; set; } + public global::OpenAI.Image? Type694 { get; set; } /// /// /// - public global::OpenAI.InviteRole? Type695 { get; set; } + public global::OpenAI.ImagesResponse? Type695 { get; set; } /// /// /// - public global::OpenAI.InviteStatus? Type696 { get; set; } + public global::System.Collections.Generic.IList? Type696 { get; set; } /// /// /// - public global::OpenAI.InviteDeleteResponse? Type697 { get; set; } + public global::OpenAI.Invite? Type697 { get; set; } /// /// /// - public global::OpenAI.InviteDeleteResponseObject? Type698 { get; set; } + public global::OpenAI.InviteObject? Type698 { get; set; } /// /// /// - public global::OpenAI.InviteListResponse? Type699 { get; set; } + public global::OpenAI.InviteRole? Type699 { get; set; } /// /// /// - public global::OpenAI.InviteListResponseObject? Type700 { get; set; } + public global::OpenAI.InviteStatus? Type700 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type701 { get; set; } + public global::OpenAI.InviteDeleteResponse? Type701 { get; set; } /// /// /// - public global::OpenAI.InviteRequest? Type702 { get; set; } + public global::OpenAI.InviteDeleteResponseObject? Type702 { get; set; } /// /// /// - public global::OpenAI.InviteRequestRole? Type703 { get; set; } + public global::OpenAI.InviteListResponse? Type703 { get; set; } /// /// /// - public global::OpenAI.ListAssistantsResponse? Type704 { get; set; } + public global::OpenAI.InviteListResponseObject? Type704 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type705 { get; set; } + public global::System.Collections.Generic.IList? Type705 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsResponse? Type706 { get; set; } + public global::OpenAI.InviteRequest? Type706 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsResponseObject? Type707 { get; set; } + public global::OpenAI.InviteRequestRole? Type707 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type708 { get; set; } + public global::OpenAI.ListAssistantsResponse? Type708 { get; set; } /// /// /// - public global::OpenAI.ListBatchesResponse? Type709 { get; set; } + public global::System.Collections.Generic.IList? Type709 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type710 { get; set; } + public global::OpenAI.ListAuditLogsResponse? Type710 { get; set; } /// /// /// - public global::OpenAI.ListBatchesResponseObject? Type711 { get; set; } + public global::OpenAI.ListAuditLogsResponseObject? Type711 { get; set; } /// /// /// - public global::OpenAI.ListFilesResponse? Type712 { get; set; } + public global::System.Collections.Generic.IList? Type712 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type713 { get; set; } + public global::OpenAI.ListBatchesResponse? Type713 { get; set; } /// /// /// - public global::OpenAI.OpenAIFile? Type714 { get; set; } + public global::System.Collections.Generic.IList? Type714 { get; set; } /// /// /// - public global::OpenAI.OpenAIFileObject? Type715 { get; set; } + public global::OpenAI.ListBatchesResponseObject? Type715 { get; set; } /// /// /// - public global::OpenAI.OpenAIFilePurpose? Type716 { get; set; } + public global::OpenAI.ListFilesResponse? Type716 { get; set; } /// /// /// - public global::OpenAI.OpenAIFileStatus? Type717 { get; set; } + public global::System.Collections.Generic.IList? Type717 { get; set; } /// /// /// - public global::OpenAI.ListFilesResponseObject? Type718 { get; set; } + public global::OpenAI.OpenAIFile? Type718 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobCheckpointsResponse? Type719 { get; set; } + public global::OpenAI.OpenAIFileObject? Type719 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type720 { get; set; } + public global::OpenAI.OpenAIFilePurpose? Type720 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobCheckpointsResponseObject? Type721 { get; set; } + public global::OpenAI.OpenAIFileStatus? Type721 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobEventsResponse? Type722 { get; set; } + public global::OpenAI.ListFineTuningJobCheckpointsResponse? Type722 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type723 { get; set; } + public global::System.Collections.Generic.IList? Type723 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobEventsResponseObject? Type724 { get; set; } + public global::OpenAI.ListFineTuningJobCheckpointsResponseObject? Type724 { get; set; } /// /// /// - public global::OpenAI.ListMessagesResponse? Type725 { get; set; } + public global::OpenAI.ListFineTuningJobEventsResponse? Type725 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type726 { get; set; } + public global::System.Collections.Generic.IList? Type726 { get; set; } /// /// /// - public global::OpenAI.ListModelsResponse? Type727 { get; set; } + public global::OpenAI.ListFineTuningJobEventsResponseObject? Type727 { get; set; } /// /// /// - public global::OpenAI.ListModelsResponseObject? Type728 { get; set; } + public global::OpenAI.ListMessagesResponse? Type728 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type729 { get; set; } + public global::System.Collections.Generic.IList? Type729 { get; set; } /// /// /// - public global::OpenAI.Model15? Type730 { get; set; } + public global::OpenAI.ListModelsResponse? Type730 { get; set; } /// /// /// - public global::OpenAI.ModelObject? Type731 { get; set; } + public global::OpenAI.ListModelsResponseObject? Type731 { get; set; } /// /// /// - public global::OpenAI.ListPaginatedFineTuningJobsResponse? Type732 { get; set; } + public global::System.Collections.Generic.IList? Type732 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type733 { get; set; } + public global::OpenAI.Model15? Type733 { get; set; } /// /// /// - public global::OpenAI.ListPaginatedFineTuningJobsResponseObject? Type734 { get; set; } + public global::OpenAI.ModelObject? Type734 { get; set; } /// /// /// - public global::OpenAI.ListRunStepsResponse? Type735 { get; set; } + public global::OpenAI.ListPaginatedFineTuningJobsResponse? Type735 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type736 { get; set; } + public global::System.Collections.Generic.IList? Type736 { get; set; } /// /// /// - public global::OpenAI.ListRunsResponse? Type737 { get; set; } + public global::OpenAI.ListPaginatedFineTuningJobsResponseObject? Type737 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type738 { get; set; } + public global::OpenAI.ListRunStepsResponse? Type738 { get; set; } /// /// /// - public global::OpenAI.ListThreadsResponse? Type739 { get; set; } + public global::System.Collections.Generic.IList? Type739 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type740 { get; set; } + public global::OpenAI.ListRunsResponse? Type740 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoreFilesResponse? Type741 { get; set; } + public global::System.Collections.Generic.IList? Type741 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type742 { get; set; } + public global::OpenAI.ListThreadsResponse? Type742 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObject? Type743 { get; set; } + public global::System.Collections.Generic.IList? Type743 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectObject? Type744 { get; set; } + public global::OpenAI.ListVectorStoreFilesResponse? Type744 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectStatus? Type745 { get; set; } + public global::System.Collections.Generic.IList? Type745 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectLastError? Type746 { get; set; } + public global::OpenAI.VectorStoreFileObject? Type746 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectLastErrorCode? Type747 { get; set; } + public global::OpenAI.VectorStoreFileObjectObject? Type747 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectChunkingStrategy? Type748 { get; set; } + public global::OpenAI.VectorStoreFileObjectStatus? Type748 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyResponseParam? Type749 { get; set; } + public global::OpenAI.VectorStoreFileObjectLastError? Type749 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyResponseParamType? Type750 { get; set; } + public global::OpenAI.VectorStoreFileObjectLastErrorCode? Type750 { get; set; } /// /// /// - public global::OpenAI.OtherChunkingStrategyResponseParam? Type751 { get; set; } + public global::OpenAI.VectorStoreFileObjectChunkingStrategy? Type751 { get; set; } /// /// /// - public global::OpenAI.OtherChunkingStrategyResponseParamType? Type752 { get; set; } + public global::OpenAI.StaticChunkingStrategyResponseParam? Type752 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminator? Type753 { get; set; } + public global::OpenAI.StaticChunkingStrategyResponseParamType? Type753 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminatorType? Type754 { get; set; } + public global::OpenAI.OtherChunkingStrategyResponseParam? Type754 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoresResponse? Type755 { get; set; } + public global::OpenAI.OtherChunkingStrategyResponseParamType? Type755 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type756 { get; set; } + public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminator? Type756 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObject? Type757 { get; set; } + public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminatorType? Type757 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectObject? Type758 { get; set; } + public global::OpenAI.ListVectorStoresResponse? Type758 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectFileCounts? Type759 { get; set; } + public global::System.Collections.Generic.IList? Type759 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectStatus? Type760 { get; set; } + public global::OpenAI.VectorStoreObject? Type760 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequest? Type761 { get; set; } + public global::OpenAI.VectorStoreObjectObject? Type761 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type762 { get; set; } + public global::OpenAI.VectorStoreObjectFileCounts? Type762 { get; set; } /// /// /// - public global::OpenAI.ToolsItem7? Type763 { get; set; } + public global::OpenAI.VectorStoreObjectStatus? Type763 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolDiscriminator? Type764 { get; set; } + public global::OpenAI.ModifyAssistantRequest? Type764 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolDiscriminatorType? Type765 { get; set; } + public global::System.Collections.Generic.IList? Type765 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResources? Type766 { get; set; } + public global::OpenAI.ToolsItem7? Type766 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResourcesCodeInterpreter? Type767 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolDiscriminator? Type767 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResourcesFileSearch? Type768 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolDiscriminatorType? Type768 { get; set; } /// /// /// - public global::OpenAI.ModifyMessageRequest? Type769 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResources? Type769 { get; set; } /// /// /// - public global::OpenAI.ModifyRunRequest? Type770 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResourcesCodeInterpreter? Type770 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequest? Type771 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResourcesFileSearch? Type771 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResources? Type772 { get; set; } + public global::OpenAI.ModifyMessageRequest? Type772 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter? Type773 { get; set; } + public global::OpenAI.ModifyRunRequest? Type773 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResourcesFileSearch? Type774 { get; set; } + public global::OpenAI.ModifyThreadRequest? Type774 { get; set; } /// /// /// - public global::OpenAI.Project? Type775 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResources? Type775 { get; set; } /// /// /// - public global::OpenAI.ProjectObject? Type776 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter? Type776 { get; set; } /// /// /// - public global::OpenAI.ProjectStatus? Type777 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResourcesFileSearch? Type777 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKey? Type778 { get; set; } + public global::OpenAI.Project? Type778 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyObject? Type779 { get; set; } + public global::OpenAI.ProjectObject? Type779 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyOwner? Type780 { get; set; } + public global::OpenAI.ProjectStatus? Type780 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyOwnerType? Type781 { get; set; } + public global::OpenAI.ProjectApiKey? Type781 { get; set; } /// /// /// - public global::OpenAI.ProjectUser? Type782 { get; set; } + public global::OpenAI.ProjectApiKeyObject? Type782 { get; set; } /// /// /// - public global::OpenAI.ProjectUserObject? Type783 { get; set; } + public global::OpenAI.ProjectApiKeyOwner? Type783 { get; set; } /// /// /// - public global::OpenAI.ProjectUserRole? Type784 { get; set; } + public global::OpenAI.ProjectApiKeyOwnerType? Type784 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccount? Type785 { get; set; } + public global::OpenAI.ProjectUser? Type785 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountObject? Type786 { get; set; } + public global::OpenAI.ProjectUserObject? Type786 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountRole? Type787 { get; set; } + public global::OpenAI.ProjectUserRole? Type787 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyDeleteResponse? Type788 { get; set; } + public global::OpenAI.ProjectServiceAccount? Type788 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyDeleteResponseObject? Type789 { get; set; } + public global::OpenAI.ProjectServiceAccountObject? Type789 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyListResponse? Type790 { get; set; } + public global::OpenAI.ProjectServiceAccountRole? Type790 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyListResponseObject? Type791 { get; set; } + public global::OpenAI.ProjectApiKeyDeleteResponse? Type791 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type792 { get; set; } + public global::OpenAI.ProjectApiKeyDeleteResponseObject? Type792 { get; set; } /// /// /// - public global::OpenAI.ProjectCreateRequest? Type793 { get; set; } + public global::OpenAI.ProjectApiKeyListResponse? Type793 { get; set; } /// /// /// - public global::OpenAI.ProjectListResponse? Type794 { get; set; } + public global::OpenAI.ProjectApiKeyListResponseObject? Type794 { get; set; } /// /// /// - public global::OpenAI.ProjectListResponseObject? Type795 { get; set; } + public global::System.Collections.Generic.IList? Type795 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type796 { get; set; } + public global::OpenAI.ProjectCreateRequest? Type796 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountApiKey? Type797 { get; set; } + public global::OpenAI.ProjectListResponse? Type797 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountApiKeyObject? Type798 { get; set; } + public global::OpenAI.ProjectListResponseObject? Type798 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateRequest? Type799 { get; set; } + public global::System.Collections.Generic.IList? Type799 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponse? Type800 { get; set; } + public global::OpenAI.ProjectServiceAccountApiKey? Type800 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponseObject? Type801 { get; set; } + public global::OpenAI.ProjectServiceAccountApiKeyObject? Type801 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponseRole? Type802 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateRequest? Type802 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountDeleteResponse? Type803 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponse? Type803 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountDeleteResponseObject? Type804 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponseObject? Type804 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountListResponse? Type805 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponseRole? Type805 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountListResponseObject? Type806 { get; set; } + public global::OpenAI.ProjectServiceAccountDeleteResponse? Type806 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type807 { get; set; } + public global::OpenAI.ProjectServiceAccountDeleteResponseObject? Type807 { get; set; } /// /// /// - public global::OpenAI.ProjectUpdateRequest? Type808 { get; set; } + public global::OpenAI.ProjectServiceAccountListResponse? Type808 { get; set; } /// /// /// - public global::OpenAI.ProjectUserCreateRequest? Type809 { get; set; } + public global::OpenAI.ProjectServiceAccountListResponseObject? Type809 { get; set; } /// /// /// - public global::OpenAI.ProjectUserCreateRequestRole? Type810 { get; set; } + public global::System.Collections.Generic.IList? Type810 { get; set; } /// /// /// - public global::OpenAI.ProjectUserDeleteResponse? Type811 { get; set; } + public global::OpenAI.ProjectUpdateRequest? Type811 { get; set; } /// /// /// - public global::OpenAI.ProjectUserDeleteResponseObject? Type812 { get; set; } + public global::OpenAI.ProjectUserCreateRequest? Type812 { get; set; } /// /// /// - public global::OpenAI.ProjectUserListResponse? Type813 { get; set; } + public global::OpenAI.ProjectUserCreateRequestRole? Type813 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type814 { get; set; } + public global::OpenAI.ProjectUserDeleteResponse? Type814 { get; set; } /// /// /// - public global::OpenAI.ProjectUserUpdateRequest? Type815 { get; set; } + public global::OpenAI.ProjectUserDeleteResponseObject? Type815 { get; set; } /// /// /// - public global::OpenAI.ProjectUserUpdateRequestRole? Type816 { get; set; } + public global::OpenAI.ProjectUserListResponse? Type816 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemCreate? Type817 { get; set; } + public global::System.Collections.Generic.IList? Type817 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemCreateItem? Type818 { get; set; } + public global::OpenAI.ProjectUserUpdateRequest? Type818 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type819 { get; set; } + public global::OpenAI.ProjectUserUpdateRequestRole? Type819 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem? Type820 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemCreate? Type820 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemDelete? Type821 { get; set; } + public global::OpenAI.RealtimeConversationItem? Type821 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemTruncate? Type822 { get; set; } + public global::OpenAI.RealtimeConversationItemObject? Type822 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferAppend? Type823 { get; set; } + public global::OpenAI.RealtimeConversationItemType? Type823 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferClear? Type824 { get; set; } + public global::OpenAI.RealtimeConversationItemStatus? Type824 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferCommit? Type825 { get; set; } + public global::OpenAI.RealtimeConversationItemRole? Type825 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCancel? Type826 { get; set; } + public global::System.Collections.Generic.IList? Type826 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCreate? Type827 { get; set; } + public global::OpenAI.RealtimeConversationItemContentItem? Type827 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCreateResponse? Type828 { get; set; } + public global::OpenAI.RealtimeConversationItemContentItemType? Type828 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type829 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemDelete? Type829 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCreateResponseTool? Type830 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemTruncate? Type830 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type831 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferAppend? Type831 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens? Type832 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferClear? Type832 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdate? Type833 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferCommit? Type833 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdateSession? Type834 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCancel? Type834 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? Type835 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCreate? Type835 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? Type836 { get; set; } + public global::OpenAI.RealtimeResponse? Type836 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type837 { get; set; } + public global::OpenAI.RealtimeResponseObject? Type837 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdateSessionTool? Type838 { get; set; } + public global::OpenAI.RealtimeResponseStatus? Type838 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type839 { get; set; } + public global::System.Collections.Generic.IList? Type839 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens? Type840 { get; set; } + public global::OpenAI.RealtimeResponseUsage? Type840 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationCreated? Type841 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdate? Type841 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationCreatedConversation? Type842 { get; set; } + public global::OpenAI.RealtimeSession? Type842 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemCreated? Type843 { get; set; } + public global::OpenAI.RealtimeSessionObject? Type843 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemCreatedItem? Type844 { get; set; } + public global::OpenAI.RealtimeSessionVoice? Type844 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type845 { get; set; } + public global::OpenAI.RealtimeAudioFormat? Type845 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem? Type846 { get; set; } + public global::OpenAI.RealtimeSessionInputAudioTranscription? Type846 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemDeleted? Type847 { get; set; } + public global::OpenAI.RealtimeSessionTurnDetection? Type847 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? Type848 { get; set; } + public global::OpenAI.RealtimeSessionTurnDetectionType? Type848 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? Type849 { get; set; } + public global::System.Collections.Generic.IList? Type849 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? Type850 { get; set; } + public global::OpenAI.RealtimeSessionTool? Type850 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemTruncated? Type851 { get; set; } + public global::OpenAI.RealtimeSessionToolChoice? Type851 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventError? Type852 { get; set; } + public global::OpenAI.OneOf? Type852 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventErrorError? Type853 { get; set; } + public global::OpenAI.RealtimeSessionMaxOutputTokens? Type853 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferCleared? Type854 { get; set; } + public global::OpenAI.RealtimeServerEventConversationCreated? Type854 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferCommitted? Type855 { get; set; } + public global::OpenAI.RealtimeServerEventConversationCreatedConversation? Type855 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted? Type856 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemCreated? Type856 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped? Type857 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemDeleted? Type857 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventRateLimitsUpdated? Type858 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? Type858 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type859 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? Type859 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit? Type860 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? Type860 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioDelta? Type861 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemTruncated? Type861 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioDone? Type862 { get; set; } + public global::OpenAI.RealtimeServerEventError? Type862 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta? Type863 { get; set; } + public global::OpenAI.RealtimeServerEventErrorError? Type863 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone? Type864 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferCleared? Type864 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartAdded? Type865 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferCommitted? Type865 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartAddedPart? Type866 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted? Type866 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartDone? Type867 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped? Type867 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartDonePart? Type868 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdated? Type868 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseCreated? Type869 { get; set; } + public global::System.Collections.Generic.IList? Type869 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseCreatedResponse? Type870 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit? Type870 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type871 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDelta? Type871 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseDone? Type872 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDone? Type872 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseDoneResponse? Type873 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta? Type873 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta? Type874 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone? Type874 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone? Type875 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAdded? Type875 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemAdded? Type876 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAddedPart? Type876 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem? Type877 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDone? Type877 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type878 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDonePart? Type878 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem? Type879 { get; set; } + public global::OpenAI.RealtimeServerEventResponseCreated? Type879 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemDone? Type880 { get; set; } + public global::OpenAI.RealtimeServerEventResponseDone? Type880 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem? Type881 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta? Type881 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type882 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone? Type882 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem? Type883 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemAdded? Type883 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseTextDelta? Type884 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemDone? Type884 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseTextDone? Type885 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDelta? Type885 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreated? Type886 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDone? Type886 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreatedSession? Type887 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreated? Type887 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? Type888 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdated? Type888 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? Type889 { get; set; } + public global::OpenAI.SubmitToolOutputsRunRequest? Type889 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type890 { get; set; } + public global::System.Collections.Generic.IList? Type890 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreatedSessionTool? Type891 { get; set; } + public global::OpenAI.SubmitToolOutputsRunRequestToolOutput? Type891 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type892 { get; set; } + public global::OpenAI.UpdateVectorStoreRequest? Type892 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens? Type893 { get; set; } + public global::OpenAI.Upload? Type893 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdated? Type894 { get; set; } + public global::OpenAI.UploadStatus? Type894 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdatedSession? Type895 { get; set; } + public global::OpenAI.UploadObject? Type895 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? Type896 { get; set; } + public global::OpenAI.UploadPart? Type896 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? Type897 { get; set; } + public global::OpenAI.UploadPartObject? Type897 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type898 { get; set; } + public global::OpenAI.User? Type898 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool? Type899 { get; set; } + public global::OpenAI.UserObject? Type899 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type900 { get; set; } + public global::OpenAI.UserRole? Type900 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? Type901 { get; set; } + public global::OpenAI.UserDeleteResponse? Type901 { get; set; } /// /// /// - public global::OpenAI.SubmitToolOutputsRunRequest? Type902 { get; set; } + public global::OpenAI.UserDeleteResponseObject? Type902 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type903 { get; set; } + public global::OpenAI.UserListResponse? Type903 { get; set; } /// /// /// - public global::OpenAI.SubmitToolOutputsRunRequestToolOutput? Type904 { get; set; } + public global::OpenAI.UserListResponseObject? Type904 { get; set; } /// /// /// - public global::OpenAI.UpdateVectorStoreRequest? Type905 { get; set; } + public global::System.Collections.Generic.IList? Type905 { get; set; } /// /// /// - public global::OpenAI.Upload? Type906 { get; set; } + public global::OpenAI.UserRoleUpdateRequest? Type906 { get; set; } /// /// /// - public global::OpenAI.UploadStatus? Type907 { get; set; } + public global::OpenAI.UserRoleUpdateRequestRole? Type907 { get; set; } /// /// /// - public global::OpenAI.UploadObject? Type908 { get; set; } + public global::OpenAI.VectorStoreFileBatchObject? Type908 { get; set; } /// /// /// - public global::OpenAI.UploadPart? Type909 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectObject? Type909 { get; set; } /// /// /// - public global::OpenAI.UploadPartObject? Type910 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectStatus? Type910 { get; set; } /// /// /// - public global::OpenAI.User? Type911 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectFileCounts? Type911 { get; set; } /// /// /// - public global::OpenAI.UserObject? Type912 { get; set; } + public global::OpenAI.RealtimeServerEventType? Type912 { get; set; } /// /// /// - public global::OpenAI.UserRole? Type913 { get; set; } + public global::OpenAI.RealtimeServerEventBase? Type913 { get; set; } /// /// /// - public global::OpenAI.UserDeleteResponse? Type914 { get; set; } + public global::OpenAI.RealtimeConversation? Type914 { get; set; } /// /// /// - public global::OpenAI.UserDeleteResponseObject? Type915 { get; set; } + public global::OpenAI.RealtimeConversationObject? Type915 { get; set; } /// /// /// - public global::OpenAI.UserListResponse? Type916 { get; set; } + public global::OpenAI.RealtimeContentPart? Type916 { get; set; } /// /// /// - public global::OpenAI.UserListResponseObject? Type917 { get; set; } + public global::OpenAI.RealtimeContentPartType? Type917 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type918 { get; set; } + public global::OpenAI.RealtimeErrorDetails? Type918 { get; set; } /// /// /// - public global::OpenAI.UserRoleUpdateRequest? Type919 { get; set; } + public global::OpenAI.RealtimeSessionUpdate? Type919 { get; set; } /// /// /// - public global::OpenAI.UserRoleUpdateRequestRole? Type920 { get; set; } + public global::OpenAI.RealtimeSessionUpdateType? Type920 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObject? Type921 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferAppend? Type921 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectObject? Type922 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferAppendType? Type922 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectStatus? Type923 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommit? Type923 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectFileCounts? Type924 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommitType? Type924 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventType? Type925 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClear? Type925 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventBase? Type926 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClearType? Type926 { get; set; } /// /// /// - public global::OpenAI.RealtimeAudioFormat? Type927 { get; set; } + public global::OpenAI.RealtimeConversationItemCreate? Type927 { get; set; } /// /// /// - public global::OpenAI.RealtimeSession? Type928 { get; set; } + public global::OpenAI.RealtimeConversationItemCreateType? Type928 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionObject? Type929 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncate? Type929 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionVoice? Type930 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncateType? Type930 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionInputAudioTranscription? Type931 { get; set; } + public global::OpenAI.RealtimeConversationItemDelete? Type931 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTurnDetection? Type932 { get; set; } + public global::OpenAI.RealtimeConversationItemDeleteType? Type932 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTurnDetectionType? Type933 { get; set; } + public global::OpenAI.RealtimeResponseCreate? Type933 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type934 { get; set; } + public global::OpenAI.RealtimeResponseCreateType? Type934 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTool? Type935 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponse? Type935 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionToolChoice? Type936 { get; set; } + public global::System.Collections.Generic.IList? Type936 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type937 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseModalitie? Type937 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionMaxOutputTokens? Type938 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseVoice? Type938 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversation? Type939 { get; set; } + public global::System.Collections.Generic.IList? Type939 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationObject? Type940 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseTool? Type940 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItem? Type941 { get; set; } + public global::OpenAI.OneOf? Type941 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemObject? Type942 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseToolChoice? Type942 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemType? Type943 { get; set; } + public global::OpenAI.OneOf? Type943 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemStatus? Type944 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseMaxOutputTokens? Type944 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemRole? Type945 { get; set; } + public global::OpenAI.RealtimeResponseCancel? Type945 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type946 { get; set; } + public global::OpenAI.RealtimeResponseCancelType? Type946 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemContentItem? Type947 { get; set; } + public global::OpenAI.RealtimeError? Type947 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemContentItemType? Type948 { get; set; } + public global::OpenAI.RealtimeErrorType? Type948 { get; set; } /// /// /// - public global::OpenAI.RealtimeContentPart? Type949 { get; set; } + public global::OpenAI.RealtimeSessionCreated? Type949 { get; set; } /// /// /// - public global::OpenAI.RealtimeContentPartType? Type950 { get; set; } + public global::OpenAI.RealtimeSessionCreatedType? Type950 { get; set; } /// /// /// - public global::OpenAI.RealtimeErrorDetails? Type951 { get; set; } + public global::OpenAI.RealtimeSessionUpdated? Type951 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponse? Type952 { get; set; } + public global::OpenAI.RealtimeSessionUpdatedType? Type952 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseObject? Type953 { get; set; } + public global::OpenAI.RealtimeConversationCreated? Type953 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseStatus? Type954 { get; set; } + public global::OpenAI.RealtimeConversationCreatedType? Type954 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type955 { get; set; } + public global::OpenAI.RealtimeConversationItemCreated? Type955 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseUsage? Type956 { get; set; } + public global::OpenAI.RealtimeConversationItemCreatedType? Type956 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdate? Type957 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompleted? Type957 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdateType? Type958 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompletedType? Type958 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferAppend? Type959 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailed? Type959 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferAppendType? Type960 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailedType? Type960 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommit? Type961 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncated? Type961 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommitType? Type962 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncatedType? Type962 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClear? Type963 { get; set; } + public global::OpenAI.RealtimeConversationItemDeleted? Type963 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClearType? Type964 { get; set; } + public global::OpenAI.RealtimeConversationItemDeletedType? Type964 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreate? Type965 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommitted? Type965 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreateType? Type966 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommittedType? Type966 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncate? Type967 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCleared? Type967 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncateType? Type968 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClearedType? Type968 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDelete? Type969 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStarted? Type969 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeleteType? Type970 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStartedType? Type970 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreate? Type971 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStopped? Type971 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateType? Type972 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStoppedType? Type972 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponse? Type973 { get; set; } + public global::OpenAI.RealtimeResponseCreated? Type973 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type974 { get; set; } + public global::OpenAI.RealtimeResponseCreatedType? Type974 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseModalitie? Type975 { get; set; } + public global::OpenAI.RealtimeResponseDone? Type975 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseVoice? Type976 { get; set; } + public global::OpenAI.RealtimeResponseDoneType? Type976 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type977 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemAdded? Type977 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseTool? Type978 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemAddedType? Type978 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type979 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemDone? Type979 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseToolChoice? Type980 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemDoneType? Type980 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type981 { get; set; } + public global::OpenAI.RealtimeResponseContentPartAdded? Type981 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseMaxOutputTokens? Type982 { get; set; } + public global::OpenAI.RealtimeResponseContentPartAddedType? Type982 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCancel? Type983 { get; set; } + public global::OpenAI.RealtimeResponseContentPartDone? Type983 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCancelType? Type984 { get; set; } + public global::OpenAI.RealtimeResponseContentPartDoneType? Type984 { get; set; } /// /// /// - public global::OpenAI.RealtimeError? Type985 { get; set; } + public global::OpenAI.RealtimeResponseTextDelta? Type985 { get; set; } /// /// /// - public global::OpenAI.RealtimeErrorType? Type986 { get; set; } + public global::OpenAI.RealtimeResponseTextDeltaType? Type986 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionCreated? Type987 { get; set; } + public global::OpenAI.RealtimeResponseTextDone? Type987 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionCreatedType? Type988 { get; set; } + public global::OpenAI.RealtimeResponseTextDoneType? Type988 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdated? Type989 { get; set; } + public global::OpenAI.RealtimeResponseAudioTranscriptDelta? Type989 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdatedType? Type990 { get; set; } + public global::OpenAI.RealtimeResponseAudioTranscriptDeltaType? Type990 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationCreated? Type991 { get; set; } + public global::OpenAI.RealtimeResponseAudioTranscriptDone? Type991 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationCreatedType? Type992 { get; set; } + public global::OpenAI.RealtimeResponseAudioTranscriptDoneType? Type992 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreated? Type993 { get; set; } + public global::OpenAI.RealtimeResponseAudioDelta? Type993 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreatedType? Type994 { get; set; } + public global::OpenAI.RealtimeResponseAudioDeltaType? Type994 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompleted? Type995 { get; set; } + public global::OpenAI.RealtimeResponseAudioDone? Type995 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompletedType? Type996 { get; set; } + public global::OpenAI.RealtimeResponseAudioDoneType? Type996 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailed? Type997 { get; set; } + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDelta? Type997 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailedType? Type998 { get; set; } + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDeltaType? Type998 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncated? Type999 { get; set; } + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDone? Type999 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncatedType? Type1000 { get; set; } + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDoneType? Type1000 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeleted? Type1001 { get; set; } + public global::OpenAI.RealtimeRateLimitsUpdated? Type1001 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeletedType? Type1002 { get; set; } + public global::OpenAI.RealtimeRateLimitsUpdatedType? Type1002 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommitted? Type1003 { get; set; } + public global::System.Collections.Generic.IList? Type1003 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommittedType? Type1004 { get; set; } + public global::OpenAI.RealtimeRateLimitsUpdatedRateLimit? Type1004 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCleared? Type1005 { get; set; } + public global::OpenAI.RealtimeRateLimitsUpdatedRateLimitName? Type1005 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClearedType? Type1006 { get; set; } + public global::OpenAI.RealtimeServerEvent? Type1006 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStarted? Type1007 { get; set; } + public global::OpenAI.RealtimeServerEventDiscriminator? Type1007 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStartedType? Type1008 { get; set; } + public global::OpenAI.RealtimeServerEventDiscriminatorType? Type1008 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStopped? Type1009 { get; set; } + public global::OpenAI.CreateBatchRequest? Type1009 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStoppedType? Type1010 { get; set; } + public global::OpenAI.CreateBatchRequestEndpoint? Type1010 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreated? Type1011 { get; set; } + public global::OpenAI.CreateBatchRequestCompletionWindow? Type1011 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreatedType? Type1012 { get; set; } + public global::OpenAI.ListAssistantsOrder? Type1012 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseDone? Type1013 { get; set; } + public global::OpenAI.ListFilesOrder? Type1013 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseDoneType? Type1014 { get; set; } + public global::OpenAI.ListAuditLogsEffectiveAt? Type1014 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemAdded? Type1015 { get; set; } + public global::System.Collections.Generic.IList? Type1015 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemAddedType? Type1016 { get; set; } + public global::OpenAI.ListMessagesOrder? Type1016 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemDone? Type1017 { get; set; } + public global::OpenAI.ListRunsOrder? Type1017 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemDoneType? Type1018 { get; set; } + public global::System.Collections.Generic.IList? Type1018 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartAdded? Type1019 { get; set; } + public global::OpenAI.CreateRunIncludeItem? Type1019 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartAddedType? Type1020 { get; set; } + public global::OpenAI.ListRunStepsOrder? Type1020 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartDone? Type1021 { get; set; } + public global::System.Collections.Generic.IList? Type1021 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartDoneType? Type1022 { get; set; } + public global::OpenAI.ListRunStepsIncludeItem? Type1022 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDelta? Type1023 { get; set; } + public global::System.Collections.Generic.IList? Type1023 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDeltaType? Type1024 { get; set; } + public global::OpenAI.GetRunStepIncludeItem? Type1024 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDone? Type1025 { get; set; } + public global::OpenAI.ListVectorStoresOrder? Type1025 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDoneType? Type1026 { get; set; } + public global::OpenAI.ListFilesInVectorStoreBatchOrder? Type1026 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDelta? Type1027 { get; set; } + public global::OpenAI.ListFilesInVectorStoreBatchFilter? Type1027 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDeltaType? Type1028 { get; set; } + public global::OpenAI.ListVectorStoreFilesOrder? Type1028 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDone? Type1029 { get; set; } + public global::OpenAI.ListVectorStoreFilesFilter? Type1029 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDoneType? Type1030 { get; set; } + public global::OpenAI.OneOf? Type1030 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDelta? Type1031 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeResponseAudioDeltaType? Type1032 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeResponseAudioDone? Type1033 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeResponseAudioDoneType? Type1034 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDelta? Type1035 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDeltaType? Type1036 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDone? Type1037 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDoneType? Type1038 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeRateLimitsUpdated? Type1039 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeRateLimitsUpdatedType? Type1040 { get; set; } - /// - /// - /// - public global::System.Collections.Generic.IList? Type1041 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeRateLimitsUpdatedRateLimit? Type1042 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeRateLimitsUpdatedRateLimitName? Type1043 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeServerEvent? Type1044 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeServerEventDiscriminator? Type1045 { get; set; } - /// - /// - /// - public global::OpenAI.RealtimeServerEventDiscriminatorType? Type1046 { get; set; } - /// - /// - /// - public global::OpenAI.CreateBatchRequest? Type1047 { get; set; } - /// - /// - /// - public global::OpenAI.CreateBatchRequestEndpoint? Type1048 { get; set; } - /// - /// - /// - public global::OpenAI.CreateBatchRequestCompletionWindow? Type1049 { get; set; } - /// - /// - /// - public global::OpenAI.ListAssistantsOrder? Type1050 { get; set; } - /// - /// - /// - public global::OpenAI.ListAuditLogsEffectiveAt? Type1051 { get; set; } - /// - /// - /// - public global::System.Collections.Generic.IList? Type1052 { get; set; } - /// - /// - /// - public global::OpenAI.ListMessagesOrder? Type1053 { get; set; } - /// - /// - /// - public global::OpenAI.ListRunsOrder? Type1054 { get; set; } - /// - /// - /// - public global::System.Collections.Generic.IList? Type1055 { get; set; } - /// - /// - /// - public global::OpenAI.CreateRunIncludeItem? Type1056 { get; set; } - /// - /// - /// - public global::OpenAI.ListRunStepsOrder? Type1057 { get; set; } - /// - /// - /// - public global::System.Collections.Generic.IList? Type1058 { get; set; } - /// - /// - /// - public global::OpenAI.ListRunStepsIncludeItem? Type1059 { get; set; } - /// - /// - /// - public global::System.Collections.Generic.IList? Type1060 { get; set; } - /// - /// - /// - public global::OpenAI.GetRunStepIncludeItem? Type1061 { get; set; } - /// - /// - /// - public global::OpenAI.ListVectorStoresOrder? Type1062 { get; set; } - /// - /// - /// - public global::OpenAI.ListFilesInVectorStoreBatchOrder? Type1063 { get; set; } - /// - /// - /// - public global::OpenAI.ListFilesInVectorStoreBatchFilter? Type1064 { get; set; } - /// - /// - /// - public global::OpenAI.ListVectorStoreFilesOrder? Type1065 { get; set; } - /// - /// - /// - public global::OpenAI.ListVectorStoreFilesFilter? Type1066 { get; set; } - /// - /// - /// - public global::OpenAI.OneOf? Type1067 { get; set; } - /// - /// - /// - public global::OpenAI.OneOf? Type1068 { get; set; } + public global::OpenAI.OneOf? Type1031 { get; set; } } } \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs index fb773e2aa..0d420972e 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs @@ -119,7 +119,7 @@ partial void ProcessCreateAssistantResponseContent( /// Create an assistant with a model and instructions. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -152,7 +152,7 @@ partial void ProcessCreateAssistantResponseContent( /// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs index 0972fae12..ac037daa7 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs @@ -185,10 +185,10 @@ partial void ProcessCreateRunResponseContent( /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs index 27ced542e..ad5d4bab4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs @@ -169,10 +169,10 @@ partial void ProcessCreateThreadAndRunResponseContent( /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs index db1be482b..31c2e9f52 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs @@ -126,7 +126,7 @@ partial void ProcessModifyAssistantResponseContent( /// /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -158,7 +158,7 @@ partial void ProcessModifyAssistantResponseContent( /// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateSpeech.g.cs b/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateSpeech.g.cs index 8486d2c51..7f8308632 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateSpeech.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateSpeech.g.cs @@ -106,13 +106,13 @@ partial void ProcessCreateSpeechResponseContent( /// Generates audio from the input text. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranscription.g.cs index 6023426fb..474872f6a 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranscription.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranscription.g.cs @@ -164,7 +164,7 @@ partial void ProcessCreateTranscriptionResponseContent( /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
diff --git a/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranslation.g.cs b/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranslation.g.cs index 45f7466d1..d6dd167a1 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranslation.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AudioClient.CreateTranslation.g.cs @@ -149,7 +149,7 @@ partial void ProcessCreateTranslationResponseContent( /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
diff --git a/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs index 4c9207333..fc85ddda8 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs @@ -128,7 +128,7 @@ partial void ProcessCreateChatCompletionResponseContent( /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// /// @@ -142,7 +142,7 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -173,17 +173,23 @@ partial void ProcessCreateChatCompletionResponseContent( /// use:
/// `["text", "audio"]` /// + /// + /// Configuration for a [Predicted Output](/docs/guides/latency-optimization#use-predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// /// /// Parameters for audio output. Required when audio output is requested with
/// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @@ -195,7 +201,7 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
@@ -236,10 +242,10 @@ partial void ProcessCreateChatCompletionResponseContent( /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with @@ -256,6 +262,7 @@ partial void ProcessCreateChatCompletionResponseContent( int? maxCompletionTokens = default, int? n = default, global::System.Collections.Generic.IList? modalities = default, + global::OpenAI.PredictionContent? prediction = default, global::OpenAI.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = default, global::OpenAI.ResponseFormat? responseFormat = default, @@ -285,6 +292,7 @@ partial void ProcessCreateChatCompletionResponseContent( MaxCompletionTokens = maxCompletionTokens, N = n, Modalities = modalities, + Prediction = prediction, Audio = audio, PresencePenalty = presencePenalty, ResponseFormat = responseFormat, diff --git a/src/libs/OpenAI/Generated/OpenAI.CompletionsClient.CreateCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.CompletionsClient.CreateCompletion.g.cs index 445cdcd91..3b65e3841 100644 --- a/src/libs/OpenAI/Generated/OpenAI.CompletionsClient.CreateCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.CompletionsClient.CreateCompletion.g.cs @@ -118,7 +118,7 @@ partial void ProcessCreateCompletionResponseContent( /// Creates a completion for the provided prompt and parameters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -137,7 +137,7 @@ partial void ProcessCreateCompletionResponseContent( /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -163,7 +163,7 @@ partial void ProcessCreateCompletionResponseContent( /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -198,7 +198,7 @@ partial void ProcessCreateCompletionResponseContent( /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.EmbeddingsClient.CreateEmbedding.g.cs b/src/libs/OpenAI/Generated/OpenAI.EmbeddingsClient.CreateEmbedding.g.cs index eefa3801e..92b232bd3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.EmbeddingsClient.CreateEmbedding.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.EmbeddingsClient.CreateEmbedding.g.cs @@ -122,7 +122,7 @@ partial void ProcessCreateEmbeddingResponseContent( /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -134,7 +134,7 @@ partial void ProcessCreateEmbeddingResponseContent( /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.FilesClient.ListFiles.g.cs b/src/libs/OpenAI/Generated/OpenAI.FilesClient.ListFiles.g.cs index fc576232b..9145f892a 100644 --- a/src/libs/OpenAI/Generated/OpenAI.FilesClient.ListFiles.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.FilesClient.ListFiles.g.cs @@ -7,11 +7,17 @@ public partial class FilesClient { partial void PrepareListFilesArguments( global::System.Net.Http.HttpClient httpClient, - ref string? purpose); + ref string? purpose, + ref int? limit, + ref global::OpenAI.ListFilesOrder? order, + ref string? after); partial void PrepareListFilesRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, - string? purpose); + string? purpose, + int? limit, + global::OpenAI.ListFilesOrder? order, + string? after); partial void ProcessListFilesResponse( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpResponseMessage httpResponseMessage); @@ -22,26 +28,42 @@ partial void ProcessListFilesResponseContent( ref string content); /// - /// Returns a list of files that belong to the user's organization. + /// Returns a list of files. /// /// + /// + /// Default Value: 10000 + /// + /// + /// Default Value: desc + /// + /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ListFilesAsync( string? purpose = default, + int? limit = default, + global::OpenAI.ListFilesOrder? order = default, + string? after = default, global::System.Threading.CancellationToken cancellationToken = default) { PrepareArguments( client: HttpClient); PrepareListFilesArguments( httpClient: HttpClient, - purpose: ref purpose); + purpose: ref purpose, + limit: ref limit, + order: ref order, + after: ref after); var __pathBuilder = new PathBuilder( path: "/files", baseUri: HttpClient.BaseAddress); __pathBuilder .AddOptionalParameter("purpose", purpose) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("order", order?.ToValueString()) + .AddOptionalParameter("after", after) ; var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -70,7 +92,10 @@ partial void ProcessListFilesResponseContent( PrepareListFilesRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, - purpose: purpose); + purpose: purpose, + limit: limit, + order: order, + after: after); using var __response = await HttpClient.SendAsync( request: __httpRequest, diff --git a/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs b/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs index 792ed7bfb..f32c4a4fb 100644 --- a/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs @@ -123,7 +123,7 @@ partial void ProcessCreateFineTuningJobResponseContent( /// /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs index 662d52ae4..0ee399f2c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs @@ -19,7 +19,7 @@ public partial interface IAssistantsClient /// Create an assistant with a model and instructions. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -52,7 +52,7 @@ public partial interface IAssistantsClient /// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs index 588c39505..990e8ab58 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs @@ -77,10 +77,10 @@ public partial interface IAssistantsClient /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs index 907e342c9..d2fa125c3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs @@ -69,10 +69,10 @@ public partial interface IAssistantsClient /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs index 77603ac48..4f06914c0 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs @@ -22,7 +22,7 @@ public partial interface IAssistantsClient /// /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -54,7 +54,7 @@ public partial interface IAssistantsClient /// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateSpeech.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateSpeech.g.cs index 06949e65b..f3bde154c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateSpeech.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateSpeech.g.cs @@ -18,13 +18,13 @@ public partial interface IAudioClient /// Generates audio from the input text. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranscription.g.cs index c920bbe7a..ccc907c3e 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranscription.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranscription.g.cs @@ -31,7 +31,7 @@ public partial interface IAudioClient /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
diff --git a/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranslation.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranslation.g.cs index e3c106ad2..d91273818 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranslation.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAudioClient.CreateTranslation.g.cs @@ -28,7 +28,7 @@ public partial interface IAudioClient /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
diff --git a/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs index 90ee8ffbf..2d40c0839 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs @@ -28,7 +28,7 @@ public partial interface IChatClient /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// /// @@ -42,7 +42,7 @@ public partial interface IChatClient /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -73,17 +73,23 @@ public partial interface IChatClient /// use:
/// `["text", "audio"]` /// + /// + /// Configuration for a [Predicted Output](/docs/guides/latency-optimization#use-predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// /// /// Parameters for audio output. Required when audio output is requested with
/// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @@ -95,7 +101,7 @@ public partial interface IChatClient /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
@@ -136,10 +142,10 @@ public partial interface IChatClient /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with @@ -156,6 +162,7 @@ public partial interface IChatClient int? maxCompletionTokens = default, int? n = default, global::System.Collections.Generic.IList? modalities = default, + global::OpenAI.PredictionContent? prediction = default, global::OpenAI.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = default, global::OpenAI.ResponseFormat? responseFormat = default, diff --git a/src/libs/OpenAI/Generated/OpenAI.ICompletionsClient.CreateCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.ICompletionsClient.CreateCompletion.g.cs index fd46f6c8f..cc00caa41 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ICompletionsClient.CreateCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ICompletionsClient.CreateCompletion.g.cs @@ -18,7 +18,7 @@ public partial interface ICompletionsClient /// Creates a completion for the provided prompt and parameters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -37,7 +37,7 @@ public partial interface ICompletionsClient /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -63,7 +63,7 @@ public partial interface ICompletionsClient /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -98,7 +98,7 @@ public partial interface ICompletionsClient /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.IEmbeddingsClient.CreateEmbedding.g.cs b/src/libs/OpenAI/Generated/OpenAI.IEmbeddingsClient.CreateEmbedding.g.cs index b13adeeb0..117b562ca 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IEmbeddingsClient.CreateEmbedding.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IEmbeddingsClient.CreateEmbedding.g.cs @@ -22,7 +22,7 @@ public partial interface IEmbeddingsClient /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -34,7 +34,7 @@ public partial interface IEmbeddingsClient /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.IFilesClient.ListFiles.g.cs b/src/libs/OpenAI/Generated/OpenAI.IFilesClient.ListFiles.g.cs index 13679bc6e..ae98ebe89 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IFilesClient.ListFiles.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IFilesClient.ListFiles.g.cs @@ -5,13 +5,23 @@ namespace OpenAI public partial interface IFilesClient { /// - /// Returns a list of files that belong to the user's organization. + /// Returns a list of files. /// /// + /// + /// Default Value: 10000 + /// + /// + /// Default Value: desc + /// + /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ListFilesAsync( string? purpose = default, + int? limit = default, + global::OpenAI.ListFilesOrder? order = default, + string? after = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs b/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs index 7d0042078..c6d5de60b 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs @@ -23,7 +23,7 @@ public partial interface IFineTuningClient /// /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImage.g.cs b/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImage.g.cs index 5be9fc86e..6c8f4c55f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImage.g.cs @@ -52,7 +52,7 @@ public partial interface IImagesClient /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageEdit.g.cs b/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageEdit.g.cs index 295ba7460..393fb85ea 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageEdit.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageEdit.g.cs @@ -54,7 +54,7 @@ public partial interface IImagesClient /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageVariation.g.cs b/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageVariation.g.cs index fc357f818..2e333d5da 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageVariation.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IImagesClient.CreateImageVariation.g.cs @@ -44,7 +44,7 @@ public partial interface IImagesClient /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.IModerationsClient.CreateModeration.g.cs b/src/libs/OpenAI/Generated/OpenAI.IModerationsClient.CreateModeration.g.cs index 396a1bcdf..914d4bd71 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IModerationsClient.CreateModeration.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IModerationsClient.CreateModeration.g.cs @@ -26,7 +26,7 @@ public partial interface IModerationsClient /// /// The content moderation model you would like to use. Learn more in
/// [the moderation guide](/docs/guides/moderation), and learn about
- /// available models [here](/docs/models/moderation).
+ /// available models [here](/docs/models#moderation).
/// Default Value: omni-moderation-latest
/// Example: omni-moderation-2024-09-26 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IUploadsClient.CreateUpload.g.cs b/src/libs/OpenAI/Generated/OpenAI.IUploadsClient.CreateUpload.g.cs index 33f28f35c..bec341836 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IUploadsClient.CreateUpload.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IUploadsClient.CreateUpload.g.cs @@ -8,7 +8,7 @@ public partial interface IUploadsClient /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// @@ -22,7 +22,7 @@ public partial interface IUploadsClient /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// diff --git a/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImage.g.cs b/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImage.g.cs index 114864d5f..571ee386b 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImage.g.cs @@ -152,7 +152,7 @@ partial void ProcessCreateImageResponseContent( /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageEdit.g.cs b/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageEdit.g.cs index 008e8dc90..951ef400d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageEdit.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageEdit.g.cs @@ -194,7 +194,7 @@ partial void ProcessCreateImageEditResponseContent( /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageVariation.g.cs b/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageVariation.g.cs index f64a59af8..29bcc88a9 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageVariation.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ImagesClient.CreateImageVariation.g.cs @@ -174,7 +174,7 @@ partial void ProcessCreateImageVariationResponseContent( /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantObject.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantObject.g.cs index 72c9fca62..8bed5ca08 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantObject.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantObject.g.cs @@ -45,7 +45,7 @@ public sealed partial class AssistantObject public required string? Description { get; set; } /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] [global::System.Text.Json.Serialization.JsonRequired] @@ -96,7 +96,7 @@ public sealed partial class AssistantObject public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantStreamEvent.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantStreamEvent.g.cs index 93a25a2e5..44e232455 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantStreamEvent.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantStreamEvent.g.cs @@ -30,7 +30,7 @@ namespace OpenAI public global::OpenAI.AssistantStreamEventDiscriminatorEvent? Event { get; } /// - /// Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + /// Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. /// #if NET6_0_OR_GREATER public global::OpenAI.ErrorEvent? Error { get; init; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantToolsFileSearchFileSearch.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantToolsFileSearchFileSearch.g.cs index 29ef6de09..b88b8e6dc 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantToolsFileSearchFileSearch.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantToolsFileSearchFileSearch.g.cs @@ -10,14 +10,14 @@ public sealed partial class AssistantToolsFileSearchFileSearch { /// /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.
- /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. ///
[global::System.Text.Json.Serialization.JsonPropertyName("max_num_results")] public int? MaxNumResults { get; set; } /// /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
- /// See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. ///
[global::System.Text.Json.Serialization.JsonPropertyName("ranking_options")] public global::OpenAI.FileSearchRankingOptions? RankingOptions { get; set; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantsApiResponseFormatOption.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantsApiResponseFormatOption.g.cs index e2b62af2f..ef43242ec 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.AssistantsApiResponseFormatOption.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.AssistantsApiResponseFormatOption.g.cs @@ -6,7 +6,7 @@ namespace OpenAI { /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.cs index f319c0c58..5f4a089e3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.cs @@ -16,7 +16,7 @@ public sealed partial class ChatCompletionRequestMessageContentPartImageImageUrl public required string Url { get; set; } /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto ///
[global::System.Text.Json.Serialization.JsonPropertyName("detail")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.cs index c696e5212..3127cd2fa 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto ///
public enum ChatCompletionRequestMessageContentPartImageImageUrlDetail diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs index e69909a44..9c3b2c7bd 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs @@ -8,6 +8,13 @@ namespace OpenAI ///
public sealed partial class CompletionUsageCompletionTokensDetails { + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that appeared in the completion. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("accepted_prediction_tokens")] + public int? AcceptedPredictionTokens { get; set; } + /// /// Audio input tokens generated by the model. /// @@ -20,6 +27,16 @@ public sealed partial class CompletionUsageCompletionTokensDetails [global::System.Text.Json.Serialization.JsonPropertyName("reasoning_tokens")] public int? ReasoningTokens { get; set; } + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that did not appear in the completion. However, like
+ /// reasoning tokens, these tokens are still counted in the total
+ /// completion tokens for purposes of billing, output, and context window
+ /// limits. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("rejected_prediction_tokens")] + public int? RejectedPredictionTokens { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs index 59c492ec9..d322e6bde 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs @@ -11,7 +11,7 @@ namespace OpenAI public sealed partial class CreateAssistantRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o ///
[global::System.Text.Json.Serialization.JsonPropertyName("model")] @@ -73,7 +73,7 @@ public sealed partial class CreateAssistantRequest public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs index d652fb95e..a2fc09b5d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs @@ -21,7 +21,7 @@ public sealed partial class CreateChatCompletionRequest public required global::System.Collections.Generic.IList Messages { get; set; } /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o ///
[global::System.Text.Json.Serialization.JsonPropertyName("model")] @@ -46,7 +46,7 @@ public sealed partial class CreateChatCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("frequency_penalty")] @@ -106,6 +106,15 @@ public sealed partial class CreateChatCompletionRequest [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] public global::System.Collections.Generic.IList? Modalities { get; set; } + /// + /// Configuration for a [Predicted Output](/docs/guides/latency-optimization#use-predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("prediction")] + public global::OpenAI.PredictionContent? Prediction { get; set; } + /// /// Parameters for audio output. Required when audio output is requested with
/// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). @@ -115,14 +124,14 @@ public sealed partial class CreateChatCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("presence_penalty")] public double? PresencePenalty { get; set; } /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @@ -141,7 +150,7 @@ public sealed partial class CreateChatCompletionRequest /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
@@ -209,13 +218,13 @@ public sealed partial class CreateChatCompletionRequest public global::OpenAI.ChatCompletionToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// [global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
[global::System.Text.Json.Serialization.JsonPropertyName("user")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudio.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudio.g.cs index 38dd97952..094e29b1c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudio.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudio.g.cs @@ -10,8 +10,8 @@ namespace OpenAI public sealed partial class CreateChatCompletionRequestAudio { /// - /// Specifies the voice type. Supported voices are `alloy`, `echo`,
- /// `fable`, `onyx`, `nova`, and `shimmer`. + /// The voice the model uses to respond. Supported voices are `alloy`,
+ /// `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. ///
[global::System.Text.Json.Serialization.JsonPropertyName("voice")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioVoiceJsonConverter))] @@ -20,7 +20,7 @@ public sealed partial class CreateChatCompletionRequestAudio /// /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
- /// `opus`, or `pcm16`. + /// `opus`, or `pcm16`. ///
[global::System.Text.Json.Serialization.JsonPropertyName("format")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioFormatJsonConverter))] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioFormat.g.cs index a3ed1663b..6f11df42f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioFormat.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioFormat.g.cs @@ -5,7 +5,7 @@ namespace OpenAI { /// /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
- /// `opus`, or `pcm16`. + /// `opus`, or `pcm16`. ///
public enum CreateChatCompletionRequestAudioFormat { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioVoice.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioVoice.g.cs index d9849f2b2..4703354cb 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioVoice.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioVoice.g.cs @@ -4,8 +4,8 @@ namespace OpenAI { /// - /// Specifies the voice type. Supported voices are `alloy`, `echo`,
- /// `fable`, `onyx`, `nova`, and `shimmer`. + /// The voice the model uses to respond. Supported voices are `alloy`,
+ /// `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. ///
public enum CreateChatCompletionRequestAudioVoice { @@ -16,23 +16,31 @@ public enum CreateChatCompletionRequestAudioVoice /// /// /// - Echo, + Ash, + /// + /// + /// + Ballad, /// /// /// - Fable, + Coral, /// /// /// - Onyx, + Echo, /// /// /// - Nova, + Sage, /// /// /// Shimmer, + /// + /// + /// + Verse, } /// @@ -48,11 +56,13 @@ public static string ToValueString(this CreateChatCompletionRequestAudioVoice va return value switch { CreateChatCompletionRequestAudioVoice.Alloy => "alloy", + CreateChatCompletionRequestAudioVoice.Ash => "ash", + CreateChatCompletionRequestAudioVoice.Ballad => "ballad", + CreateChatCompletionRequestAudioVoice.Coral => "coral", CreateChatCompletionRequestAudioVoice.Echo => "echo", - CreateChatCompletionRequestAudioVoice.Fable => "fable", - CreateChatCompletionRequestAudioVoice.Onyx => "onyx", - CreateChatCompletionRequestAudioVoice.Nova => "nova", + CreateChatCompletionRequestAudioVoice.Sage => "sage", CreateChatCompletionRequestAudioVoice.Shimmer => "shimmer", + CreateChatCompletionRequestAudioVoice.Verse => "verse", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } @@ -64,11 +74,13 @@ public static string ToValueString(this CreateChatCompletionRequestAudioVoice va return value switch { "alloy" => CreateChatCompletionRequestAudioVoice.Alloy, + "ash" => CreateChatCompletionRequestAudioVoice.Ash, + "ballad" => CreateChatCompletionRequestAudioVoice.Ballad, + "coral" => CreateChatCompletionRequestAudioVoice.Coral, "echo" => CreateChatCompletionRequestAudioVoice.Echo, - "fable" => CreateChatCompletionRequestAudioVoice.Fable, - "onyx" => CreateChatCompletionRequestAudioVoice.Onyx, - "nova" => CreateChatCompletionRequestAudioVoice.Nova, + "sage" => CreateChatCompletionRequestAudioVoice.Sage, "shimmer" => CreateChatCompletionRequestAudioVoice.Shimmer, + "verse" => CreateChatCompletionRequestAudioVoice.Verse, _ => null, }; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs index e514fa1de..30c167409 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs @@ -5,7 +5,7 @@ namespace OpenAI { /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateCompletionRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateCompletionRequest.g.cs index 686c76734..8864b3a35 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateCompletionRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateCompletionRequest.g.cs @@ -11,7 +11,7 @@ namespace OpenAI public sealed partial class CreateCompletionRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.AnyOfJsonConverterFactory2))] @@ -46,7 +46,7 @@ public sealed partial class CreateCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("frequency_penalty")] @@ -87,7 +87,7 @@ public sealed partial class CreateCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("presence_penalty")] @@ -147,7 +147,7 @@ public sealed partial class CreateCompletionRequest public double? TopP { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
[global::System.Text.Json.Serialization.JsonPropertyName("user")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateEmbeddingRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateEmbeddingRequest.g.cs index 5dba8402b..e821ca71e 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateEmbeddingRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateEmbeddingRequest.g.cs @@ -20,7 +20,7 @@ public sealed partial class CreateEmbeddingRequest public required global::OpenAI.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>> Input { get; set; } /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small ///
[global::System.Text.Json.Serialization.JsonPropertyName("model")] @@ -44,7 +44,7 @@ public sealed partial class CreateEmbeddingRequest public int? Dimensions { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
[global::System.Text.Json.Serialization.JsonPropertyName("user")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs index a05dd9131..7b524dadb 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs @@ -12,7 +12,7 @@ public sealed partial class CreateFineTuningJobRequest { /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini ///
[global::System.Text.Json.Serialization.JsonPropertyName("model")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageEditRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageEditRequest.g.cs index 9b5c0553d..764549fd6 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageEditRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageEditRequest.g.cs @@ -80,7 +80,7 @@ public sealed partial class CreateImageEditRequest public global::OpenAI.CreateImageEditRequestResponseFormat? ResponseFormat { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
[global::System.Text.Json.Serialization.JsonPropertyName("user")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageRequest.g.cs index 96e09efbb..3f09a2e12 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageRequest.g.cs @@ -72,7 +72,7 @@ public sealed partial class CreateImageRequest public global::OpenAI.CreateImageRequestStyle? Style { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
[global::System.Text.Json.Serialization.JsonPropertyName("user")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageVariationRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageVariationRequest.g.cs index 1fb4311e5..22f050e42 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageVariationRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateImageVariationRequest.g.cs @@ -60,7 +60,7 @@ public sealed partial class CreateImageVariationRequest public global::OpenAI.CreateImageVariationRequestSize? Size { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
[global::System.Text.Json.Serialization.JsonPropertyName("user")] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateModerationRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateModerationRequest.g.cs index 4c53e6799..290fc9a43 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateModerationRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateModerationRequest.g.cs @@ -22,7 +22,7 @@ public sealed partial class CreateModerationRequest /// /// The content moderation model you would like to use. Learn more in
/// [the moderation guide](/docs/guides/moderation), and learn about
- /// available models [here](/docs/models/moderation).
+ /// available models [here](/docs/models#moderation).
/// Default Value: omni-moderation-latest
/// Example: omni-moderation-2024-09-26 ///
diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs index 61726bb17..33223612d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs @@ -108,13 +108,13 @@ public sealed partial class CreateRunRequest public global::OpenAI.AssistantsApiToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// [global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequest.g.cs index 999fe6eb5..25fae730e 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequest.g.cs @@ -11,7 +11,7 @@ namespace OpenAI public sealed partial class CreateSpeechRequest { /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.AnyOfJsonConverterFactory2))] @@ -26,7 +26,7 @@ public sealed partial class CreateSpeechRequest public required string Input { get; set; } /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// [global::System.Text.Json.Serialization.JsonPropertyName("voice")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.CreateSpeechRequestVoiceJsonConverter))] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequestVoice.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequestVoice.g.cs index a113e19c3..9937085c3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequestVoice.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateSpeechRequestVoice.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// public enum CreateSpeechRequestVoice { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs index 427f49a7c..4075b7ca1 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs @@ -108,13 +108,13 @@ public sealed partial class CreateThreadAndRunRequest public global::OpenAI.AssistantsApiToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// [global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionRequest.g.cs index 12327a2ff..dadfa652d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionRequest.g.cs @@ -40,7 +40,7 @@ public sealed partial class CreateTranscriptionRequest public string? Language { get; set; } /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// [global::System.Text.Json.Serialization.JsonPropertyName("prompt")] public string? Prompt { get; set; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationRequest.g.cs index 78fc96554..e3c843a5f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationRequest.g.cs @@ -34,7 +34,7 @@ public sealed partial class CreateTranslationRequest public required global::OpenAI.AnyOf Model { get; set; } /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// [global::System.Text.Json.Serialization.JsonPropertyName("prompt")] public string? Prompt { get; set; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ErrorEvent.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ErrorEvent.g.cs index bfcb27302..c4317b5b7 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ErrorEvent.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ErrorEvent.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + /// Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. /// public sealed partial class ErrorEvent { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FileSearchRankingOptions.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FileSearchRankingOptions.g.cs index 708540e1a..e29243139 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FileSearchRankingOptions.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FileSearchRankingOptions.g.cs @@ -5,7 +5,7 @@ namespace OpenAI { /// /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
- /// See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. ///
public sealed partial class FileSearchRankingOptions { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.g.cs index 4cc079301..0690c3c24 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.g.cs @@ -23,7 +23,7 @@ public sealed partial class FinetuneChatRequestInput public global::System.Collections.Generic.IList? Tools { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// [global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesOrder.g.cs similarity index 56% rename from src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.ListFilesOrder.g.cs index 53cdb193e..9dfb695ae 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesOrder.g.cs @@ -4,40 +4,46 @@ namespace OpenAI { /// - /// + /// Default Value: desc /// - public enum RealtimeClientEventSessionUpdateSessionMaxOutputTokens + public enum ListFilesOrder { /// /// /// - Inf, + Asc, + /// + /// + /// + Desc, } /// /// Enum extensions to do fast conversions without the reflection. /// - public static class RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions + public static class ListFilesOrderExtensions { /// /// Converts an enum to a string. /// - public static string ToValueString(this RealtimeClientEventSessionUpdateSessionMaxOutputTokens value) + public static string ToValueString(this ListFilesOrder value) { return value switch { - RealtimeClientEventSessionUpdateSessionMaxOutputTokens.Inf => "inf", + ListFilesOrder.Asc => "asc", + ListFilesOrder.Desc => "desc", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } /// /// Converts an string to a enum. /// - public static RealtimeClientEventSessionUpdateSessionMaxOutputTokens? ToEnum(string value) + public static ListFilesOrder? ToEnum(string value) { return value switch { - "inf" => RealtimeClientEventSessionUpdateSessionMaxOutputTokens.Inf, + "asc" => ListFilesOrder.Asc, + "desc" => ListFilesOrder.Desc, _ => null, }; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesResponse.g.cs index 16eb01b05..f708b71ba 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesResponse.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesResponse.g.cs @@ -8,6 +8,13 @@ namespace OpenAI ///
public sealed partial class ListFilesResponse { + /// + /// Example: list + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Object { get; set; } + /// /// /// @@ -16,11 +23,25 @@ public sealed partial class ListFilesResponse public required global::System.Collections.Generic.IList Data { get; set; } /// - /// + /// Example: file-abc123 /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.ListFilesResponseObjectJsonConverter))] - public global::OpenAI.ListFilesResponseObject Object { get; set; } + [global::System.Text.Json.Serialization.JsonPropertyName("first_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string FirstId { get; set; } + + /// + /// Example: file-abc456 + /// + [global::System.Text.Json.Serialization.JsonPropertyName("last_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string LastId { get; set; } + + /// + /// Example: false + /// + [global::System.Text.Json.Serialization.JsonPropertyName("has_more")] + [global::System.Text.Json.Serialization.JsonRequired] + public required bool HasMore { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs index 95da4e44c..3f6e0f132 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs @@ -9,7 +9,7 @@ namespace OpenAI public sealed partial class ModifyAssistantRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] public string? Model { get; set; } @@ -68,7 +68,7 @@ public sealed partial class ModifyAssistantRequest public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.PredictionContent.g.cs similarity index 70% rename from src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTool.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.PredictionContent.g.cs index f89240788..a5fab4eb2 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTool.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.PredictionContent.g.cs @@ -1,36 +1,33 @@ +#pragma warning disable CS0618 // Type or member is obsolete + #nullable enable namespace OpenAI { /// - /// + /// Static predicted output content, such as the content of a text file that is
+ /// being regenerated. ///
- public sealed partial class RealtimeClientEventSessionUpdateSessionTool + public sealed partial class PredictionContent { /// - /// The type of the tool, e.g., "function". + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. ///
[global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The name of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("name")] - public string? Name { get; set; } - - /// - /// The description of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("description")] - public string? Description { get; set; } + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.PredictionContentTypeJsonConverter))] + public global::OpenAI.PredictionContentType Type { get; set; } /// - /// Parameters of the function in JSON Schema. + /// The content that should be matched when generating a model response.
+ /// If generated tokens would match this content, the entire model response
+ /// can be returned much more quickly. ///
- [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] - public object? Parameters { get; set; } + [global::System.Text.Json.Serialization.JsonPropertyName("content")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.OneOf> Content { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -69,14 +66,14 @@ public string ToJson( /// /// Deserializes a JSON string using the provided JsonSerializerContext. /// - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTool? FromJson( + public static global::OpenAI.PredictionContent? FromJson( string json, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return global::System.Text.Json.JsonSerializer.Deserialize( json, - typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionTool), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionTool; + typeof(global::OpenAI.PredictionContent), + jsonSerializerContext) as global::OpenAI.PredictionContent; } /// @@ -86,11 +83,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTool? FromJson( + public static global::OpenAI.PredictionContent? FromJson( string json, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.Deserialize( + return global::System.Text.Json.JsonSerializer.Deserialize( json, jsonSerializerOptions); } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesResponseObject.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.PredictionContentType.g.cs similarity index 59% rename from src/libs/OpenAI/Generated/OpenAI.Models.ListFilesResponseObject.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.PredictionContentType.g.cs index 7d9e86a39..e50c95f11 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ListFilesResponseObject.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.PredictionContentType.g.cs @@ -4,40 +4,41 @@ namespace OpenAI { /// - /// + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. ///
- public enum ListFilesResponseObject + public enum PredictionContentType { /// /// /// - List, + Content, } /// /// Enum extensions to do fast conversions without the reflection. /// - public static class ListFilesResponseObjectExtensions + public static class PredictionContentTypeExtensions { /// /// Converts an enum to a string. /// - public static string ToValueString(this ListFilesResponseObject value) + public static string ToValueString(this PredictionContentType value) { return value switch { - ListFilesResponseObject.List => "list", + PredictionContentType.Content => "content", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } /// /// Converts an string to a enum. /// - public static ListFilesResponseObject? ToEnum(string value) + public static PredictionContentType? ToEnum(string value) { return value switch { - "list" => ListFilesResponseObject.List, + "content" => PredictionContentType.Content, _ => null, }; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreate.g.cs index 23231f704..6ba9fe4fe 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreate.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreate.g.cs @@ -4,7 +4,8 @@ namespace OpenAI { /// - /// Send this event when adding an item to the conversation. + /// Add a new Item to the Conversation's context, including messages, function calls, and function call responses. This event can be used both to populate a "history" of the conversation and to add new items mid-stream, but has the current limitation that it cannot populate assistant audio messages.
+ /// If successful, the server will respond with a `conversation.item.created` event, otherwise an `error` event will be sent. ///
public sealed partial class RealtimeClientEventConversationItemCreate { @@ -15,24 +16,28 @@ public sealed partial class RealtimeClientEventConversationItemCreate public string? EventId { get; set; } /// - /// The event type, must be "conversation.item.create". + /// The event type, must be `conversation.item.create`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// The ID of the preceding item after which the new item will be inserted. + /// The ID of the preceding item after which the new item will be inserted. If not set, the new item will be appended to the end of the conversation. If set, it allows an item to be inserted mid-conversation. If the ID cannot be found, an error will be returned and the item will not be added. /// [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] public string? PreviousItemId { get; set; } /// - /// The item to add to the conversation. + /// A realtime Item is of three types: message, function_call, or function_call_output.
+ /// A message item can contain text or audio.
+ /// A function_call item indicates a model's desire to call a function, which is the only tool supported for now
+ /// A function_call_output item indicates a function response.
+ /// The client may add and remove message and function_call_output Items using conversation.item.create and conversation.item.delete. ///
[global::System.Text.Json.Serialization.JsonPropertyName("item")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeClientEventConversationItemCreateItem Item { get; set; } + public required global::OpenAI.RealtimeConversationItem Item { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItem.g.cs deleted file mode 100644 index 3cee52ce3..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItem.g.cs +++ /dev/null @@ -1,129 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// The item to add to the conversation. - /// - public sealed partial class RealtimeClientEventConversationItemCreateItem - { - /// - /// The unique ID of the item. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The type of the item ("message", "function_call", "function_call_output"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The status of the item ("completed", "in_progress", "incomplete"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status")] - public string? Status { get; set; } - - /// - /// The role of the message sender ("user", "assistant", "system"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("role")] - public string? Role { get; set; } - - /// - /// The content of the message. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("content")] - public global::System.Collections.Generic.IList? Content { get; set; } - - /// - /// The ID of the function call (for "function_call" items). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] - public string? CallId { get; set; } - - /// - /// The name of the function being called (for "function_call" items). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("name")] - public string? Name { get; set; } - - /// - /// The arguments of the function call (for "function_call" items). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("arguments")] - public string? Arguments { get; set; } - - /// - /// The output of the function call (for "function_call_output" items). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output")] - public string? Output { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventConversationItemCreateItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventConversationItemCreateItem), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventConversationItemCreateItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventConversationItemCreateItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItemContentItem.g.cs deleted file mode 100644 index b9065b393..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItemContentItem.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public sealed partial class RealtimeClientEventConversationItemCreateItemContentItem - { - /// - /// The content type ("input_text", "input_audio", "text", "audio"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The text content. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("text")] - public string? Text { get; set; } - - /// - /// Base64-encoded audio bytes. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("audio")] - public string? Audio { get; set; } - - /// - /// The transcript of the audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] - public string? Transcript { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemDelete.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemDelete.g.cs index ee6a7b527..d36934166 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemDelete.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemDelete.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Send this event when you want to remove any item from the conversation history. + /// Send this event when you want to remove any item from the conversation history. The server will respond with a `conversation.item.deleted` event, unless the item does not exist in the conversation history, in which case the server will respond with an error. /// public sealed partial class RealtimeClientEventConversationItemDelete { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemTruncate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemTruncate.g.cs index 75abddbe4..35c0f6bad 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemTruncate.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemTruncate.g.cs @@ -4,7 +4,9 @@ namespace OpenAI { /// - /// Send this event when you want to truncate a previous assistant message’s audio. + /// Send this event to truncate a previous assistant message’s audio. The server will produce audio faster than realtime, so this event is useful when the user interrupts to truncate audio that has already been sent to the client but not yet played. This will synchronize the server's understanding of the audio with the client's playback.
+ /// Truncating audio will delete the server-side text transcript to ensure there is not text in the context that hasn't been heard by the user.
+ /// If successful, the server will respond with a `conversation.item.truncated` event. ///
public sealed partial class RealtimeClientEventConversationItemTruncate { @@ -22,21 +24,21 @@ public sealed partial class RealtimeClientEventConversationItemTruncate public required string Type { get; set; } /// - /// The ID of the assistant message item to truncate. + /// The ID of the assistant message item to truncate. Only assistant message items can be truncated. /// [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] [global::System.Text.Json.Serialization.JsonRequired] public required string ItemId { get; set; } /// - /// The index of the content part to truncate. + /// The index of the content part to truncate. Set this to 0. /// [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] [global::System.Text.Json.Serialization.JsonRequired] public required int ContentIndex { get; set; } /// - /// Inclusive duration up to which audio is truncated, in milliseconds. + /// Inclusive duration up to which audio is truncated, in milliseconds. If the audio_end_ms is greater than the actual audio duration, the server will respond with an error. /// [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferAppend.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferAppend.g.cs index f19e8ca28..d50ac09a4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferAppend.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferAppend.g.cs @@ -4,7 +4,8 @@ namespace OpenAI { /// - /// Send this event to append audio bytes to the input audio buffer. + /// Send this event to append audio bytes to the input audio buffer. The audio buffer is temporary storage you can write to and later commit. In Server VAD mode, the audio buffer is used to detect speech and the server will decide when to commit. When Server VAD is disabled, you must commit the audio buffer manually.
+ /// The client may choose how much audio to place in each event up to a maximum of 15 MiB, for example streaming smaller chunks from the client may allow the VAD to be more responsive. Unlike made other client events, the server will not send a confirmation response to this event. ///
public sealed partial class RealtimeClientEventInputAudioBufferAppend { @@ -22,7 +23,7 @@ public sealed partial class RealtimeClientEventInputAudioBufferAppend public required string Type { get; set; } /// - /// Base64-encoded audio bytes. + /// Base64-encoded audio bytes. This must be in the format specified by the `input_audio_format` field in the session configuration. /// [global::System.Text.Json.Serialization.JsonPropertyName("audio")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferClear.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferClear.g.cs index 004e29464..5e281e43f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferClear.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferClear.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Send this event to clear the audio bytes in the buffer. + /// Send this event to clear the audio bytes in the buffer. The server will respond with an `input_audio_buffer.cleared` event. /// public sealed partial class RealtimeClientEventInputAudioBufferClear { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferCommit.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferCommit.g.cs index b2303e554..a40cfe0e3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferCommit.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferCommit.g.cs @@ -4,7 +4,8 @@ namespace OpenAI { /// - /// Send this event to commit audio bytes to a user message. + /// Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.
+ /// Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event. ///
public sealed partial class RealtimeClientEventInputAudioBufferCommit { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs index 771a4840c..42ae7231d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Send this event to cancel an in-progress response. + /// Send this event to cancel an in-progress response. The server will respond with a `response.cancelled` event or an error if there is no response to cancel. /// public sealed partial class RealtimeClientEventResponseCancel { @@ -15,7 +15,7 @@ public sealed partial class RealtimeClientEventResponseCancel public string? EventId { get; set; } /// - /// The event type, must be "response.cancel". + /// The event type, must be `response.cancel`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs index af446d424..0947f560a 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs @@ -4,7 +4,10 @@ namespace OpenAI { /// - /// Send this event to trigger a response generation. + /// This event instructs the server to create a Response, which means triggering model inference. When in Server VAD mode, the server will create Responses automatically.
+ /// A Response will include at least one Item, and may have two, in which case the second will be a function call. These Items will be appended to the conversation history.
+ /// The server will respond with a `response.created` event, events for Items and content created, and finally a `response.done` event to indicate the Response is complete.
+ /// The `response.create` event includes inference configuration like `instructions`, and `temperature`. These fields will override the Session's configuration for this Response only. ///
public sealed partial class RealtimeClientEventResponseCreate { @@ -15,18 +18,18 @@ public sealed partial class RealtimeClientEventResponseCreate public string? EventId { get; set; } /// - /// The event type, must be "response.create". + /// The event type, must be `response.create`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// Configuration for the response. + /// The response resource. /// [global::System.Text.Json.Serialization.JsonPropertyName("response")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeClientEventResponseCreateResponse Response { get; set; } + public required global::OpenAI.RealtimeResponse Response { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponse.g.cs deleted file mode 100644 index 7b09cff76..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponse.g.cs +++ /dev/null @@ -1,126 +0,0 @@ - -#pragma warning disable CS0618 // Type or member is obsolete - -#nullable enable - -namespace OpenAI -{ - /// - /// Configuration for the response. - /// - public sealed partial class RealtimeClientEventResponseCreateResponse - { - /// - /// The modalities for the response. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] - public global::System.Collections.Generic.IList? Modalities { get; set; } - - /// - /// Instructions for the model. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] - public string? Instructions { get; set; } - - /// - /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("voice")] - public string? Voice { get; set; } - - /// - /// The format of output audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] - public string? OutputAudioFormat { get; set; } - - /// - /// Tools (functions) available to the model. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } - - /// - /// How the model chooses tools. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] - public string? ToolChoice { get; set; } - - /// - /// Sampling temperature. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] - public double? Temperature { get; set; } - - /// - /// Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] - public global::OpenAI.OneOf? MaxOutputTokens { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventResponseCreateResponse? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventResponseCreateResponse), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCreateResponse; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventResponseCreateResponse? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs deleted file mode 100644 index 5afb18f40..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs +++ /dev/null @@ -1,45 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public enum RealtimeClientEventResponseCreateResponseMaxOutputTokens - { - /// - /// - /// - Inf, - } - - /// - /// Enum extensions to do fast conversions without the reflection. - /// - public static class RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions - { - /// - /// Converts an enum to a string. - /// - public static string ToValueString(this RealtimeClientEventResponseCreateResponseMaxOutputTokens value) - { - return value switch - { - RealtimeClientEventResponseCreateResponseMaxOutputTokens.Inf => "inf", - _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), - }; - } - /// - /// Converts an string to a enum. - /// - public static RealtimeClientEventResponseCreateResponseMaxOutputTokens? ToEnum(string value) - { - return value switch - { - "inf" => RealtimeClientEventResponseCreateResponseMaxOutputTokens.Inf, - _ => null, - }; - } - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseTool.g.cs deleted file mode 100644 index 305ec4380..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseTool.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public sealed partial class RealtimeClientEventResponseCreateResponseTool - { - /// - /// The type of the tool. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The name of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("name")] - public string? Name { get; set; } - - /// - /// The description of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("description")] - public string? Description { get; set; } - - /// - /// Parameters of the function in JSON Schema. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] - public object? Parameters { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventResponseCreateResponseTool? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventResponseCreateResponseTool), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCreateResponseTool; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventResponseCreateResponseTool? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseToolParameters.g.cs deleted file mode 100644 index 220e13472..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseToolParameters.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Parameters of the function in JSON Schema. - /// - public sealed partial class RealtimeClientEventResponseCreateResponseToolParameters - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs index 188f85131..d52c0e7e7 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Send this event to update the session’s default configuration. + /// Send this event to update the session’s default configuration. The client may send this event at any time to update the session configuration, and any field may be updated at any time, except for "voice". The server will respond with a `session.updated` event that shows the full effective configuration. Only fields that are present are updated, thus the correct way to clear a field like "instructions" is to pass an empty string. /// public sealed partial class RealtimeClientEventSessionUpdate { @@ -22,11 +22,16 @@ public sealed partial class RealtimeClientEventSessionUpdate public required string Type { get; set; } /// - /// Session configuration to update. + /// A session refers to a single WebSocket connection between a client and the server.
+ /// Once a client creates a session, it then sends JSON-formatted events containing text and audio chunks.
+ /// The server will respond in kind with audio containing voice output, a text transcript of that voice output,
+ /// and function calls (if functions are provided by the client).
+ /// A realtime Session represents the overall client-server interaction, and contains default configuration.
+ /// It has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create). ///
[global::System.Text.Json.Serialization.JsonPropertyName("session")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeClientEventSessionUpdateSession Session { get; set; } + public required global::OpenAI.RealtimeSession Session { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSession.g.cs deleted file mode 100644 index 6a376c15b..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSession.g.cs +++ /dev/null @@ -1,144 +0,0 @@ - -#pragma warning disable CS0618 // Type or member is obsolete - -#nullable enable - -namespace OpenAI -{ - /// - /// Session configuration to update. - /// - public sealed partial class RealtimeClientEventSessionUpdateSession - { - /// - /// The set of modalities the model can respond with. To disable audio, set this to ["text"]. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] - public global::System.Collections.Generic.IList? Modalities { get; set; } - - /// - /// The default system instructions prepended to model calls. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] - public string? Instructions { get; set; } - - /// - /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. Cannot be changed once the model has responded with audio at least once. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("voice")] - public string? Voice { get; set; } - - /// - /// The format of input audio. Options are "pcm16", "g711_ulaw", or "g711_alaw". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] - public string? InputAudioFormat { get; set; } - - /// - /// The format of output audio. Options are "pcm16", "g711_ulaw", or "g711_alaw". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] - public string? OutputAudioFormat { get; set; } - - /// - /// Configuration for input audio transcription. Can be set to `null` to turn off. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] - public global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? InputAudioTranscription { get; set; } - - /// - /// Configuration for turn detection. Can be set to `null` to turn off. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] - public global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? TurnDetection { get; set; } - - /// - /// Tools (functions) available to the model. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } - - /// - /// How the model chooses tools. Options are "auto", "none", "required", or specify a function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] - public string? ToolChoice { get; set; } - - /// - /// Sampling temperature for the model. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] - public double? Temperature { get; set; } - - /// - /// Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] - public global::OpenAI.OneOf? MaxOutputTokens { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventSessionUpdateSession? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventSessionUpdateSession), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSession; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventSessionUpdateSession? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionInputAudioTranscription.g.cs deleted file mode 100644 index 92dcfcde1..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionInputAudioTranscription.g.cs +++ /dev/null @@ -1,81 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Configuration for input audio transcription. Can be set to `null` to turn off. - /// - public sealed partial class RealtimeClientEventSessionUpdateSessionInputAudioTranscription - { - /// - /// The model to use for transcription (e.g., "whisper-1"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("model")] - public string? Model { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionToolParameters.g.cs deleted file mode 100644 index ab4dcaf24..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionToolParameters.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Parameters of the function in JSON Schema. - /// - public sealed partial class RealtimeClientEventSessionUpdateSessionToolParameters - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTurnDetection.g.cs deleted file mode 100644 index a47fc6f0a..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTurnDetection.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Configuration for turn detection. Can be set to `null` to turn off. - /// - public sealed partial class RealtimeClientEventSessionUpdateSessionTurnDetection - { - /// - /// Type of turn detection, only "server_vad" is currently supported. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// Activation threshold for VAD (0.0 to 1.0). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] - public double? Threshold { get; set; } - - /// - /// Amount of audio to include before speech starts (in milliseconds). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] - public int? PrefixPaddingMs { get; set; } - - /// - /// Duration of silence to detect speech stop (in milliseconds). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] - public int? SilenceDurationMs { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection), - jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreated.g.cs index 240ac89ff..99d33fd1f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreated.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreated.g.cs @@ -4,7 +4,10 @@ namespace OpenAI { /// - /// Returned when a conversation item is created. + /// Returned when a conversation item is created. There are several scenarios that produce this event:
+ /// - The server is generating a Response, which if successful will produce either one or two Items, which will be of type `message` (role `assistant`) or type `function_call`.
+ /// - The input audio buffer has been committed, either by the client or the server (in `server_vad` mode). The server will take the content of the input audio buffer and add it to a new user message Item.
+ /// - The client has sent a `conversation.item.create` event to add a new Item to the Conversation. ///
public sealed partial class RealtimeServerEventConversationItemCreated { @@ -16,25 +19,29 @@ public sealed partial class RealtimeServerEventConversationItemCreated public required string EventId { get; set; } /// - /// The event type, must be "conversation.item.created". + /// The event type, must be `conversation.item.created`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// The ID of the preceding item. + /// The ID of the preceding item in the Conversation context, allows the client to understand the order of the conversation. /// [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] [global::System.Text.Json.Serialization.JsonRequired] public required string PreviousItemId { get; set; } /// - /// The item that was created. + /// A realtime Item is of three types: message, function_call, or function_call_output.
+ /// A message item can contain text or audio.
+ /// A function_call item indicates a model's desire to call a function, which is the only tool supported for now
+ /// A function_call_output item indicates a function response.
+ /// The client may add and remove message and function_call_output Items using conversation.item.create and conversation.item.delete. ///
[global::System.Text.Json.Serialization.JsonPropertyName("item")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeServerEventConversationItemCreatedItem Item { get; set; } + public required global::OpenAI.RealtimeConversationItem Item { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItem.g.cs deleted file mode 100644 index d9cd543e9..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItem.g.cs +++ /dev/null @@ -1,135 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// The item that was created. - /// - public sealed partial class RealtimeServerEventConversationItemCreatedItem - { - /// - /// The unique ID of the item. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The object type, must be "realtime.item". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - public string? Object { get; set; } - - /// - /// The type of the item ("message", "function_call", "function_call_output"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The status of the item ("completed", "in_progress", "incomplete"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status")] - public string? Status { get; set; } - - /// - /// The role associated with the item ("user", "assistant", "system"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("role")] - public string? Role { get; set; } - - /// - /// The content of the item. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("content")] - public global::System.Collections.Generic.IList? Content { get; set; } - - /// - /// The ID of the function call (for "function_call" items). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] - public string? CallId { get; set; } - - /// - /// The name of the function being called. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("name")] - public string? Name { get; set; } - - /// - /// The arguments of the function call. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("arguments")] - public string? Arguments { get; set; } - - /// - /// The output of the function call (for "function_call_output" items). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output")] - public string? Output { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventConversationItemCreatedItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventConversationItemCreatedItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemCreatedItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventConversationItemCreatedItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItemContentItem.g.cs deleted file mode 100644 index da57a9438..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItemContentItem.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public sealed partial class RealtimeServerEventConversationItemCreatedItemContentItem - { - /// - /// The content type ("text", "audio", "input_text", "input_audio"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The text content. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("text")] - public string? Text { get; set; } - - /// - /// Base64-encoded audio data. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("audio")] - public string? Audio { get; set; } - - /// - /// The transcript of the audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] - public string? Transcript { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemDeleted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemDeleted.g.cs index 3e2114c47..554f8112c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemDeleted.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemDeleted.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when an item in the conversation is deleted. + /// Returned when an item in the conversation is deleted by the client with a `conversation.item.delete` event. This event is used to synchronize the server's understanding of the conversation history with the client's view. /// public sealed partial class RealtimeServerEventConversationItemDeleted { @@ -16,7 +16,7 @@ public sealed partial class RealtimeServerEventConversationItemDeleted public required string EventId { get; set; } /// - /// The event type, must be "conversation.item.deleted". + /// The event type, must be `conversation.item.deleted`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs index 7c1a6e872..0e004d08e 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs @@ -4,7 +4,8 @@ namespace OpenAI { /// - /// Returned when input audio transcription is enabled and a transcription succeeds. + /// This event is the output of audio transcription for user audio written to the user audio buffer. Transcription begins when the input audio buffer is committed by the client or server (in `server_vad` mode). Transcription runs asynchronously with Response creation, so this event may come before or after the Response events.
+ /// Realtime API models accept audio natively, and thus input transcription is a separate process run on a separate ASR (Automatic Speech Recognition) model, currently always `whisper-1`. Thus the transcript may diverge somewhat from the model's interpretation, and should be treated as a rough guide. ///
public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionCompleted { @@ -16,14 +17,14 @@ public sealed partial class RealtimeServerEventConversationItemInputAudioTranscr public required string EventId { get; set; } /// - /// The event type, must be "conversation.item.input_audio_transcription.completed". + /// The event type, must be `conversation.item.input_audio_transcription.completed`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// The ID of the user message item. + /// The ID of the user message item containing the audio. /// [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs index 6f792bafa..e8e88bde8 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when input audio transcription is configured, and a transcription request for a user message failed. + /// Returned when input audio transcription is configured, and a transcription request for a user message failed. These events are separate from other `error` events so that the client can identify the related Item. /// public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailed { @@ -16,7 +16,7 @@ public sealed partial class RealtimeServerEventConversationItemInputAudioTranscr public required string EventId { get; set; } /// - /// The event type, must be "conversation.item.input_audio_transcription.failed". + /// The event type, must be `conversation.item.input_audio_transcription.failed`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemTruncated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemTruncated.g.cs index 979928a16..866480a73 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemTruncated.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemTruncated.g.cs @@ -4,7 +4,8 @@ namespace OpenAI { /// - /// Returned when an earlier assistant audio message item is truncated by the client. + /// Returned when an earlier assistant audio message item is truncated by the client with a `conversation.item.truncate` event. This event is used to synchronize the server's understanding of the audio with the client's playback.
+ /// This action will truncate the audio and remove the server-side text transcript to ensure there is no text in the context that hasn't been heard by the user. ///
public sealed partial class RealtimeServerEventConversationItemTruncated { @@ -16,7 +17,7 @@ public sealed partial class RealtimeServerEventConversationItemTruncated public required string EventId { get; set; } /// - /// The event type, must be "conversation.item.truncated". + /// The event type, must be `conversation.item.truncated`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventError.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventError.g.cs index 414906ddd..3eb0c4da1 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventError.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventError.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when an error occurs. + /// Returned when an error occurs, which could be a client problem or a server problem. Most errors are recoverable and the session will stay open, we recommend to implementors to monitor and log error messages by default. /// public sealed partial class RealtimeServerEventError { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCleared.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCleared.g.cs index b68928cf7..059557a02 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCleared.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCleared.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when the input audio buffer is cleared by the client. + /// Returned when the input audio buffer is cleared by the client with a `input_audio_buffer.clear` event. /// public sealed partial class RealtimeServerEventInputAudioBufferCleared { @@ -16,7 +16,7 @@ public sealed partial class RealtimeServerEventInputAudioBufferCleared public required string EventId { get; set; } /// - /// The event type, must be "input_audio_buffer.cleared". + /// The event type, must be `input_audio_buffer.cleared`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs index cff9ee9cd..594c31495 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode. + /// Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode. The `item_id` property is the ID of the user message item that will be created, thus a `conversation.item.created` event will also be sent to the client. /// public sealed partial class RealtimeServerEventInputAudioBufferCommitted { @@ -16,7 +16,7 @@ public sealed partial class RealtimeServerEventInputAudioBufferCommitted public required string EventId { get; set; } /// - /// The event type, must be "input_audio_buffer.committed". + /// The event type, must be `input_audio_buffer.committed`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs index b573d5e41..a3105b446 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned in server turn detection mode when speech is detected. + /// Sent by the server when in `server_vad` mode to indicate that speech has been detected in the audio buffer. This can happen any time audio is added to the buffer (unless speech is already detected). The client may want to use this event to interrupt audio playback or provide visual feedback to the user. The client should expect to receive a `input_audio_buffer.speech_stopped` event when speech stops. The `item_id` property is the ID of the user message item that will be created when speech stops and will also be included in the `input_audio_buffer.speech_stopped` event (unless the client manually commits the audio buffer during VAD activation). /// public sealed partial class RealtimeServerEventInputAudioBufferSpeechStarted { @@ -16,14 +16,14 @@ public sealed partial class RealtimeServerEventInputAudioBufferSpeechStarted public required string EventId { get; set; } /// - /// The event type, must be "input_audio_buffer.speech_started". + /// The event type, must be `input_audio_buffer.speech_started`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// Milliseconds since the session started when speech was detected. + /// Milliseconds from the start of all audio written to the buffer during the session when speech was first detected. This will correspond to the beginning of audio sent to the model, and thus includes the `prefix_padding_ms` configured in the Session. /// [global::System.Text.Json.Serialization.JsonPropertyName("audio_start_ms")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs index ecdac70b3..3beaee590 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned in server turn detection mode when speech stops. + /// Returned in `server_vad` mode when the server detects the end of speech in the audio buffer. The server will also send an `conversation.item.created` event with the user message item that is created from the audio buffer. /// public sealed partial class RealtimeServerEventInputAudioBufferSpeechStopped { @@ -16,14 +16,14 @@ public sealed partial class RealtimeServerEventInputAudioBufferSpeechStopped public required string EventId { get; set; } /// - /// The event type, must be "input_audio_buffer.speech_stopped". + /// The event type, must be `input_audio_buffer.speech_stopped`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// Milliseconds since the session started when speech stopped. + /// Milliseconds since the session started when speech stopped. This will correspond to the end of audio sent to the model, and thus includes the `min_silence_duration_ms` configured in the Session. /// [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdated.g.cs index ad6c536c3..8414f2d31 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdated.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdated.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Emitted after every "response.done" event to indicate the updated rate limits. + /// Emitted at the beginning of a Response to indicate the updated rate limits. When a Response is created some tokens will be "reserved" for the output tokens, the rate limits shown here reflect that reservation, which is then adjusted accordingly once the Response is completed. /// public sealed partial class RealtimeServerEventRateLimitsUpdated { @@ -16,7 +16,7 @@ public sealed partial class RealtimeServerEventRateLimitsUpdated public required string EventId { get; set; } /// - /// The event type, must be "rate_limits.updated". + /// The event type, must be `rate_limits.updated`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs index a0898d0ff..a6bf5b9e2 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs @@ -9,7 +9,7 @@ namespace OpenAI public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit { /// - /// The name of the rate limit ("requests", "tokens", "input_tokens", "output_tokens"). + /// The name of the rate limit (`requests`, `tokens`). /// [global::System.Text.Json.Serialization.JsonPropertyName("name")] public string? Name { get; set; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreated.g.cs index 75cf59d55..08de7b3dd 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreated.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreated.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when a new Response is created. The first event of response creation, where the response is in an initial state of "in_progress". + /// Returned when a new Response is created. The first event of response creation, where the response is in an initial state of `in_progress`. /// public sealed partial class RealtimeServerEventResponseCreated { @@ -16,7 +16,7 @@ public sealed partial class RealtimeServerEventResponseCreated public required string EventId { get; set; } /// - /// The event type, must be "response.created". + /// The event type, must be `response.created`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] @@ -27,7 +27,7 @@ public sealed partial class RealtimeServerEventResponseCreated ///
[global::System.Text.Json.Serialization.JsonPropertyName("response")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeServerEventResponseCreatedResponse Response { get; set; } + public required global::OpenAI.RealtimeResponse Response { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponse.g.cs deleted file mode 100644 index c1937839f..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponse.g.cs +++ /dev/null @@ -1,111 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// The response resource. - /// - public sealed partial class RealtimeServerEventResponseCreatedResponse - { - /// - /// The unique ID of the response. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The object type, must be "realtime.response". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - public string? Object { get; set; } - - /// - /// The status of the response ("in_progress"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status")] - public string? Status { get; set; } - - /// - /// Additional details about the status. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status_details")] - public object? StatusDetails { get; set; } - - /// - /// The list of output items generated by the response. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output")] - public global::System.Collections.Generic.IList? Output { get; set; } - - /// - /// Usage statistics for the response. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("usage")] - public object? Usage { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseCreatedResponse? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponse), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponse; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseCreatedResponse? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseOutputItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseOutputItem.g.cs deleted file mode 100644 index 580154acf..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseOutputItem.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// An item in the response output. - /// - public sealed partial class RealtimeServerEventResponseCreatedResponseOutputItem - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseStatusDetails.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseStatusDetails.g.cs deleted file mode 100644 index ec7ec927e..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseStatusDetails.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Additional details about the status. - /// - public sealed partial class RealtimeServerEventResponseCreatedResponseStatusDetails - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseUsage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseUsage.g.cs deleted file mode 100644 index 6bb8a68cc..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseUsage.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Usage statistics for the response. - /// - public sealed partial class RealtimeServerEventResponseCreatedResponseUsage - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDone.g.cs index fdbebe01b..ab062d9a8 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDone.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDone.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when a Response is done streaming. Always emitted, no matter the final state. + /// Returned when a Response is done streaming. Always emitted, no matter the final state. The Response object included in the `response.done` event will include all output Items in the Response but will omit the raw audio data. /// public sealed partial class RealtimeServerEventResponseDone { @@ -27,7 +27,7 @@ public sealed partial class RealtimeServerEventResponseDone /// [global::System.Text.Json.Serialization.JsonPropertyName("response")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeServerEventResponseDoneResponse Response { get; set; } + public required global::OpenAI.RealtimeResponse Response { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponse.g.cs deleted file mode 100644 index 3079da207..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponse.g.cs +++ /dev/null @@ -1,111 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// The response resource. - /// - public sealed partial class RealtimeServerEventResponseDoneResponse - { - /// - /// The unique ID of the response. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The object type, must be "realtime.response". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - public string? Object { get; set; } - - /// - /// The final status of the response ("completed", "cancelled", "failed", "incomplete"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status")] - public string? Status { get; set; } - - /// - /// Additional details about the status. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status_details")] - public object? StatusDetails { get; set; } - - /// - /// The list of output items generated by the response. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output")] - public global::System.Collections.Generic.IList? Output { get; set; } - - /// - /// Usage statistics for the response. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("usage")] - public object? Usage { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseDoneResponse? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseDoneResponse), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponse; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseDoneResponse? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseOutputItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseOutputItem.g.cs deleted file mode 100644 index 26f64b700..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseOutputItem.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// An item in the response output. - /// - public sealed partial class RealtimeServerEventResponseDoneResponseOutputItem - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseStatusDetails.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseStatusDetails.g.cs deleted file mode 100644 index c89e87153..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseStatusDetails.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Additional details about the status. - /// - public sealed partial class RealtimeServerEventResponseDoneResponseStatusDetails - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseUsage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseUsage.g.cs deleted file mode 100644 index dca3c469a..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseUsage.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Usage statistics for the response. - /// - public sealed partial class RealtimeServerEventResponseDoneResponseUsage - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseDoneResponseUsage? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseDoneResponseUsage), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponseUsage; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseDoneResponseUsage? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAdded.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAdded.g.cs index 72d21ffbb..8ff6b99fa 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAdded.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAdded.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when a new Item is created during response generation. + /// Returned when a new Item is created during Response generation. /// public sealed partial class RealtimeServerEventResponseOutputItemAdded { @@ -16,32 +16,36 @@ public sealed partial class RealtimeServerEventResponseOutputItemAdded public required string EventId { get; set; } /// - /// The event type, must be "response.output_item.added". + /// The event type, must be `response.output_item.added`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// The ID of the response to which the item belongs. + /// The ID of the Response to which the item belongs. /// [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] [global::System.Text.Json.Serialization.JsonRequired] public required string ResponseId { get; set; } /// - /// The index of the output item in the response. + /// The index of the output item in the Response. /// [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] [global::System.Text.Json.Serialization.JsonRequired] public required int OutputIndex { get; set; } /// - /// The item that was added. + /// A realtime Item is of three types: message, function_call, or function_call_output.
+ /// A message item can contain text or audio.
+ /// A function_call item indicates a model's desire to call a function, which is the only tool supported for now
+ /// A function_call_output item indicates a function response.
+ /// The client may add and remove message and function_call_output Items using conversation.item.create and conversation.item.delete. ///
[global::System.Text.Json.Serialization.JsonPropertyName("item")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem Item { get; set; } + public required global::OpenAI.RealtimeConversationItem Item { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItem.g.cs deleted file mode 100644 index 2277f59a6..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItem.g.cs +++ /dev/null @@ -1,111 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// The item that was added. - /// - public sealed partial class RealtimeServerEventResponseOutputItemAddedItem - { - /// - /// The unique ID of the item. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The object type, must be "realtime.item". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - public string? Object { get; set; } - - /// - /// The type of the item ("message", "function_call", "function_call_output"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The status of the item ("in_progress", "completed"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status")] - public string? Status { get; set; } - - /// - /// The role associated with the item ("assistant"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("role")] - public string? Role { get; set; } - - /// - /// The content of the item. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("content")] - public global::System.Collections.Generic.IList? Content { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItemContentItem.g.cs deleted file mode 100644 index 099c74d36..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItemContentItem.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public sealed partial class RealtimeServerEventResponseOutputItemAddedItemContentItem - { - /// - /// The content type ("text", "audio"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The text content. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("text")] - public string? Text { get; set; } - - /// - /// Base64-encoded audio data. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("audio")] - public string? Audio { get; set; } - - /// - /// The transcript of the audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] - public string? Transcript { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDone.g.cs index d124824e1..7f1c53e21 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDone.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDone.g.cs @@ -16,32 +16,36 @@ public sealed partial class RealtimeServerEventResponseOutputItemDone public required string EventId { get; set; } /// - /// The event type, must be "response.output_item.done". + /// The event type, must be `response.output_item.done`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// The ID of the response to which the item belongs. + /// The ID of the Response to which the item belongs. /// [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] [global::System.Text.Json.Serialization.JsonRequired] public required string ResponseId { get; set; } /// - /// The index of the output item in the response. + /// The index of the output item in the Response. /// [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] [global::System.Text.Json.Serialization.JsonRequired] public required int OutputIndex { get; set; } /// - /// The completed item. + /// A realtime Item is of three types: message, function_call, or function_call_output.
+ /// A message item can contain text or audio.
+ /// A function_call item indicates a model's desire to call a function, which is the only tool supported for now
+ /// A function_call_output item indicates a function response.
+ /// The client may add and remove message and function_call_output Items using conversation.item.create and conversation.item.delete. ///
[global::System.Text.Json.Serialization.JsonPropertyName("item")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem Item { get; set; } + public required global::OpenAI.RealtimeConversationItem Item { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItem.g.cs deleted file mode 100644 index bbc16c781..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItem.g.cs +++ /dev/null @@ -1,111 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// The completed item. - /// - public sealed partial class RealtimeServerEventResponseOutputItemDoneItem - { - /// - /// The unique ID of the item. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The object type, must be "realtime.item". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - public string? Object { get; set; } - - /// - /// The type of the item ("message", "function_call", "function_call_output"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The final status of the item ("completed", "incomplete"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("status")] - public string? Status { get; set; } - - /// - /// The role associated with the item ("assistant"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("role")] - public string? Role { get; set; } - - /// - /// The content of the item. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("content")] - public global::System.Collections.Generic.IList? Content { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItemContentItem.g.cs deleted file mode 100644 index be0658706..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItemContentItem.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public sealed partial class RealtimeServerEventResponseOutputItemDoneItemContentItem - { - /// - /// The content type ("text", "audio"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The text content. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("text")] - public string? Text { get; set; } - - /// - /// Base64-encoded audio data. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("audio")] - public string? Audio { get; set; } - - /// - /// The transcript of the audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] - public string? Transcript { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreated.g.cs index 4c57bdda0..7c6cc062a 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreated.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreated.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when a session is created. Emitted automatically when a new connection is established. + /// Returned when a Session is created. Emitted automatically when a new connection is established as the first server event. This event will contain the default Session configuration. /// public sealed partial class RealtimeServerEventSessionCreated { @@ -16,18 +16,23 @@ public sealed partial class RealtimeServerEventSessionCreated public required string EventId { get; set; } /// - /// The event type, must be "session.created". + /// The event type, must be `session.created`. /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] [global::System.Text.Json.Serialization.JsonRequired] public required string Type { get; set; } /// - /// The session resource. + /// A session refers to a single WebSocket connection between a client and the server.
+ /// Once a client creates a session, it then sends JSON-formatted events containing text and audio chunks.
+ /// The server will respond in kind with audio containing voice output, a text transcript of that voice output,
+ /// and function calls (if functions are provided by the client).
+ /// A realtime Session represents the overall client-server interaction, and contains default configuration.
+ /// It has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create). ///
[global::System.Text.Json.Serialization.JsonPropertyName("session")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeServerEventSessionCreatedSession Session { get; set; } + public required global::OpenAI.RealtimeSession Session { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSession.g.cs deleted file mode 100644 index bb9920a03..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSession.g.cs +++ /dev/null @@ -1,162 +0,0 @@ - -#pragma warning disable CS0618 // Type or member is obsolete - -#nullable enable - -namespace OpenAI -{ - /// - /// The session resource. - /// - public sealed partial class RealtimeServerEventSessionCreatedSession - { - /// - /// The unique ID of the session. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The object type, must be "realtime.session". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - public string? Object { get; set; } - - /// - /// The default model used for this session. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("model")] - public string? Model { get; set; } - - /// - /// The set of modalities the model can respond with. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] - public global::System.Collections.Generic.IList? Modalities { get; set; } - - /// - /// The default system instructions. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] - public string? Instructions { get; set; } - - /// - /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("voice")] - public string? Voice { get; set; } - - /// - /// The format of input audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] - public string? InputAudioFormat { get; set; } - - /// - /// The format of output audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] - public string? OutputAudioFormat { get; set; } - - /// - /// Configuration for input audio transcription. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] - public global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? InputAudioTranscription { get; set; } - - /// - /// Configuration for turn detection. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] - public global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? TurnDetection { get; set; } - - /// - /// Tools (functions) available to the model. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } - - /// - /// How the model chooses tools. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] - public string? ToolChoice { get; set; } - - /// - /// Sampling temperature. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] - public double? Temperature { get; set; } - - /// - /// Maximum number of output tokens. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] - public global::OpenAI.OneOf? MaxOutputTokens { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionCreatedSession? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionCreatedSession), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSession; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionCreatedSession? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionInputAudioTranscription.g.cs deleted file mode 100644 index 7ef7d82b2..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionInputAudioTranscription.g.cs +++ /dev/null @@ -1,87 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Configuration for input audio transcription. - /// - public sealed partial class RealtimeServerEventSessionCreatedSessionInputAudioTranscription - { - /// - /// Whether input audio transcription is enabled. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("enabled")] - public bool? Enabled { get; set; } - - /// - /// The model used for transcription. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("model")] - public string? Model { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs deleted file mode 100644 index 0c67b699b..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs +++ /dev/null @@ -1,45 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public enum RealtimeServerEventSessionCreatedSessionMaxOutputTokens - { - /// - /// - /// - Inf, - } - - /// - /// Enum extensions to do fast conversions without the reflection. - /// - public static class RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions - { - /// - /// Converts an enum to a string. - /// - public static string ToValueString(this RealtimeServerEventSessionCreatedSessionMaxOutputTokens value) - { - return value switch - { - RealtimeServerEventSessionCreatedSessionMaxOutputTokens.Inf => "inf", - _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), - }; - } - /// - /// Converts an string to a enum. - /// - public static RealtimeServerEventSessionCreatedSessionMaxOutputTokens? ToEnum(string value) - { - return value switch - { - "inf" => RealtimeServerEventSessionCreatedSessionMaxOutputTokens.Inf, - _ => null, - }; - } - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTool.g.cs deleted file mode 100644 index f21e87d87..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTool.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public sealed partial class RealtimeServerEventSessionCreatedSessionTool - { - /// - /// The type of the tool. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The name of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("name")] - public string? Name { get; set; } - - /// - /// The description of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("description")] - public string? Description { get; set; } - - /// - /// Parameters of the function in JSON Schema. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] - public object? Parameters { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTool? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionTool), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionTool; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTool? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionToolParameters.g.cs deleted file mode 100644 index 7e058a80f..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionToolParameters.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Parameters of the function in JSON Schema. - /// - public sealed partial class RealtimeServerEventSessionCreatedSessionToolParameters - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTurnDetection.g.cs deleted file mode 100644 index f2e77640c..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTurnDetection.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Configuration for turn detection. - /// - public sealed partial class RealtimeServerEventSessionCreatedSessionTurnDetection - { - /// - /// The type of turn detection ("server_vad" or "none"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// Activation threshold for VAD. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] - public double? Threshold { get; set; } - - /// - /// Audio included before speech starts (in milliseconds). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] - public int? PrefixPaddingMs { get; set; } - - /// - /// Duration of silence to detect speech stop (in milliseconds). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] - public int? SilenceDurationMs { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdated.g.cs index 9c0a54a90..ad96db9d4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdated.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdated.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// Returned when a session is updated. + /// Returned when a session is updated with a `session.update` event, unless there is an error. /// public sealed partial class RealtimeServerEventSessionUpdated { @@ -23,11 +23,16 @@ public sealed partial class RealtimeServerEventSessionUpdated public required string Type { get; set; } /// - /// The updated session resource. + /// A session refers to a single WebSocket connection between a client and the server.
+ /// Once a client creates a session, it then sends JSON-formatted events containing text and audio chunks.
+ /// The server will respond in kind with audio containing voice output, a text transcript of that voice output,
+ /// and function calls (if functions are provided by the client).
+ /// A realtime Session represents the overall client-server interaction, and contains default configuration.
+ /// It has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create). ///
[global::System.Text.Json.Serialization.JsonPropertyName("session")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeServerEventSessionUpdatedSession Session { get; set; } + public required global::OpenAI.RealtimeSession Session { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSession.g.cs deleted file mode 100644 index 798638fe0..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSession.g.cs +++ /dev/null @@ -1,162 +0,0 @@ - -#pragma warning disable CS0618 // Type or member is obsolete - -#nullable enable - -namespace OpenAI -{ - /// - /// The updated session resource. - /// - public sealed partial class RealtimeServerEventSessionUpdatedSession - { - /// - /// The unique ID of the session. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("id")] - public string? Id { get; set; } - - /// - /// The object type, must be "realtime.session". - /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - public string? Object { get; set; } - - /// - /// The default model used for this session. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("model")] - public string? Model { get; set; } - - /// - /// The set of modalities the model can respond with. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] - public global::System.Collections.Generic.IList? Modalities { get; set; } - - /// - /// The default system instructions. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] - public string? Instructions { get; set; } - - /// - /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("voice")] - public string? Voice { get; set; } - - /// - /// The format of input audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] - public string? InputAudioFormat { get; set; } - - /// - /// The format of output audio. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] - public string? OutputAudioFormat { get; set; } - - /// - /// Configuration for input audio transcription. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] - public global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? InputAudioTranscription { get; set; } - - /// - /// Configuration for turn detection. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] - public global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? TurnDetection { get; set; } - - /// - /// Tools (functions) available to the model. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } - - /// - /// How the model chooses tools. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] - public string? ToolChoice { get; set; } - - /// - /// Sampling temperature. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] - public double? Temperature { get; set; } - - /// - /// Maximum number of output tokens. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] - public global::OpenAI.OneOf? MaxOutputTokens { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionUpdatedSession? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSession), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSession; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionUpdatedSession? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription.g.cs deleted file mode 100644 index d6183673d..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription.g.cs +++ /dev/null @@ -1,87 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Configuration for input audio transcription. - /// - public sealed partial class RealtimeServerEventSessionUpdatedSessionInputAudioTranscription - { - /// - /// Whether input audio transcription is enabled. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("enabled")] - public bool? Enabled { get; set; } - - /// - /// The model used for transcription. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("model")] - public string? Model { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs deleted file mode 100644 index 8736863dd..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs +++ /dev/null @@ -1,45 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public enum RealtimeServerEventSessionUpdatedSessionMaxOutputTokens - { - /// - /// - /// - Inf, - } - - /// - /// Enum extensions to do fast conversions without the reflection. - /// - public static class RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions - { - /// - /// Converts an enum to a string. - /// - public static string ToValueString(this RealtimeServerEventSessionUpdatedSessionMaxOutputTokens value) - { - return value switch - { - RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.Inf => "inf", - _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), - }; - } - /// - /// Converts an string to a enum. - /// - public static RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? ToEnum(string value) - { - return value switch - { - "inf" => RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.Inf, - _ => null, - }; - } - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTool.g.cs deleted file mode 100644 index d16bfaa8c..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTool.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// - /// - public sealed partial class RealtimeServerEventSessionUpdatedSessionTool - { - /// - /// The type of the tool. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// The name of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("name")] - public string? Name { get; set; } - - /// - /// The description of the function. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("description")] - public string? Description { get; set; } - - /// - /// Parameters of the function in JSON Schema. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] - public object? Parameters { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionToolParameters.g.cs deleted file mode 100644 index ec0888bca..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionToolParameters.g.cs +++ /dev/null @@ -1,76 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Parameters of the function in JSON Schema. - /// - public sealed partial class RealtimeServerEventSessionUpdatedSessionToolParameters - { - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTurnDetection.g.cs deleted file mode 100644 index 3c150aa84..000000000 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTurnDetection.g.cs +++ /dev/null @@ -1,99 +0,0 @@ - -#nullable enable - -namespace OpenAI -{ - /// - /// Configuration for turn detection. - /// - public sealed partial class RealtimeServerEventSessionUpdatedSessionTurnDetection - { - /// - /// The type of turn detection ("server_vad" or "none"). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } - - /// - /// Activation threshold for VAD. - /// - [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] - public double? Threshold { get; set; } - - /// - /// Audio included before speech starts (in milliseconds). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] - public int? PrefixPaddingMs { get; set; } - - /// - /// Duration of silence to detect speech stop (in milliseconds). - /// - [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] - public int? SilenceDurationMs { get; set; } - - /// - /// Additional properties that are not explicitly defined in the schema - /// - [global::System.Text.Json.Serialization.JsonExtensionData] - public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); - - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. - /// - public string ToJson( - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - this.GetType(), - jsonSerializerContext); - } - - /// - /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public string ToJson( - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Serialize( - this, - jsonSerializerOptions); - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerContext. - /// - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? FromJson( - string json, - global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection), - jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection; - } - - /// - /// Deserializes a JSON string using the provided JsonSerializerOptions. - /// -#if NET8_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] - [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] -#endif - public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? FromJson( - string json, - global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) - { - return global::System.Text.Json.JsonSerializer.Deserialize( - json, - jsonSerializerOptions); - } - - } -} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs index 48bd99556..f1c1c9c95 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs @@ -6,7 +6,7 @@ namespace OpenAI { /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RunObject.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RunObject.g.cs index a2b8b0943..831c8b52b 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RunObject.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RunObject.g.cs @@ -194,14 +194,14 @@ public sealed partial class RunObject public required global::OpenAI.AssistantsApiToolChoiceOption ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// [global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] [global::System.Text.Json.Serialization.JsonRequired] public required bool? ParallelToolCalls { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. diff --git a/src/libs/OpenAI/Generated/OpenAI.ModerationsClient.CreateModeration.g.cs b/src/libs/OpenAI/Generated/OpenAI.ModerationsClient.CreateModeration.g.cs index b174365c4..d536f3b4a 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ModerationsClient.CreateModeration.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ModerationsClient.CreateModeration.g.cs @@ -126,7 +126,7 @@ partial void ProcessCreateModerationResponseContent( /// /// The content moderation model you would like to use. Learn more in
/// [the moderation guide](/docs/guides/moderation), and learn about
- /// available models [here](/docs/models/moderation).
+ /// available models [here](/docs/models#moderation).
/// Default Value: omni-moderation-latest
/// Example: omni-moderation-2024-09-26 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.UploadsClient.CreateUpload.g.cs b/src/libs/OpenAI/Generated/OpenAI.UploadsClient.CreateUpload.g.cs index 90045987f..fe44fa16c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.UploadsClient.CreateUpload.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.UploadsClient.CreateUpload.g.cs @@ -25,7 +25,7 @@ partial void ProcessCreateUploadResponseContent( /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). ///
/// @@ -122,7 +122,7 @@ partial void ProcessCreateUploadResponseContent( /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). ///
/// diff --git a/src/libs/OpenAI/openapi.yaml b/src/libs/OpenAI/openapi.yaml index 6dc8ca21e..331febdfa 100644 --- a/src/libs/OpenAI/openapi.yaml +++ b/src/libs/OpenAI/openapi.yaml @@ -42,7 +42,7 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string responses: @@ -484,13 +484,13 @@ paths: curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" - title: Image input request: curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"What'\\''s in this image?\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n }\n }\n ]\n }\n ],\n \"max_tokens\": 300\n }'\n" python: "from openai import OpenAI\n\nclient = OpenAI()\n\nresponse = client.chat.completions.create(\n model=\"gpt-4o\",\n messages=[\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n }\n },\n ],\n }\n ],\n max_tokens=300,\n)\n\nprint(response.choices[0])\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"What's in this image?\" },\n {\n type: \"image_url\",\n image_url: {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n }\n ],\n },\n ],\n });\n console.log(response.choices[0]);\n}\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" - title: Streaming request: curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" @@ -502,13 +502,13 @@ paths: curl: "curl https://api.openai.com/v1/chat/completions \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What'\\''s the weather like in Boston today?\"\n }\n ],\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"tool_choice\": \"auto\"\n}'\n" python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\nmessages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=messages,\n tools=tools,\n tool_choice=\"auto\"\n)\n\nprint(completion)\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}];\n const tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n ];\n\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: messages,\n tools: tools,\n tool_choice: \"auto\",\n });\n\n console.log(response);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" - title: Logprobs request: curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"logprobs\": true,\n \"top_logprobs\": 2\n }'\n" python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n logprobs=True,\n top_logprobs=2\n)\n\nprint(completion.choices[0].message)\nprint(completion.choices[0].logprobs)\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"user\", content: \"Hello!\" }],\n model: \"VAR_model_id\",\n logprobs: true,\n top_logprobs: 2,\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n" /completions: post: tags: @@ -579,7 +579,7 @@ paths: get: tags: - Files - summary: Returns a list of files that belong to the user's organization. + summary: Returns a list of files. operationId: listFiles parameters: - name: purpose @@ -587,6 +587,26 @@ paths: description: Only return files with the given purpose. schema: type: string + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.\n" + schema: + type: integer + default: 10000 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string responses: '200': description: OK @@ -1225,7 +1245,7 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string responses: @@ -2353,7 +2373,7 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string - name: run_id @@ -2565,7 +2585,7 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string responses: @@ -2600,7 +2620,7 @@ paths: type: string - name: 'include[]' in: query - description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" schema: type: array items: @@ -2801,12 +2821,12 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string - name: 'include[]' in: query - description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" schema: type: array items: @@ -2858,7 +2878,7 @@ paths: type: string - name: 'include[]' in: query - description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" schema: type: array items: @@ -2937,7 +2957,7 @@ paths: post: tags: - Uploads - summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search/supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" + summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search#supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" operationId: createUpload requestBody: content: @@ -3088,7 +3108,7 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string responses: @@ -3390,7 +3410,7 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string - name: filter @@ -3456,7 +3476,7 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string - name: filter @@ -3648,7 +3668,7 @@ components: nullable: true model: type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" instructions: maxLength: 256000 type: string @@ -3756,7 +3776,7 @@ components: maximum: 50 minimum: 1 type: integer - description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" ranking_options: $ref: '#/components/schemas/FileSearchRankingOptions' description: Overrides for the file search tool. @@ -3794,7 +3814,7 @@ components: - $ref: '#/components/schemas/ResponseFormatText' - $ref: '#/components/schemas/ResponseFormatJsonObject' - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" + description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" x-oaiExpandable: true AssistantsApiToolChoiceOption: oneOf: @@ -4630,7 +4650,7 @@ components: - low - high type: string - description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).' + description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).' default: auto description: "Learn about [image inputs](/docs/guides/vision).\n" ChatCompletionRequestMessageContentPartRefusal: @@ -4978,12 +4998,18 @@ components: completion_tokens_details: type: object properties: + accepted_prediction_tokens: + type: integer + description: "When using Predicted Outputs, the number of tokens in the\nprediction that appeared in the completion.\n" audio_tokens: type: integer description: Audio input tokens generated by the model. reasoning_tokens: type: integer description: Tokens generated by the model for reasoning. + rejected_prediction_tokens: + type: integer + description: "When using Predicted Outputs, the number of tokens in the\nprediction that did not appear in the completion. However, like\nreasoning tokens, these tokens are still counted in the total\ncompletion tokens for purposes of billing, output, and context window\nlimits.\n" description: Breakdown of tokens used in a completion. prompt_tokens_details: type: object @@ -5029,7 +5055,7 @@ components: - gpt-3.5-turbo-0125 - gpt-3.5-turbo-16k-0613 type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" example: gpt-4o x-oaiTypeLabel: string name: @@ -5226,14 +5252,14 @@ components: x-oaiMeta: name: The chat completion object group: chat - example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" CreateChatCompletionImageResponse: type: object description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' x-oaiMeta: name: The chat completion chunk object group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" CreateChatCompletionRequest: required: - model @@ -5284,7 +5310,7 @@ components: - gpt-3.5-turbo-0125 - gpt-3.5-turbo-16k-0613 type: string - description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.' + description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.' example: gpt-4o x-oaiTypeLabel: string store: @@ -5302,7 +5328,7 @@ components: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" default: 0 nullable: true logit_bias: @@ -5343,6 +5369,12 @@ components: example: 1 modalities: $ref: '#/components/schemas/ChatCompletionModalities' + prediction: + oneOf: + - $ref: '#/components/schemas/PredictionContent' + description: "Configuration for a [Predicted Output](/docs/guides/latency-optimization#use-predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.\n" + nullable: true + x-oaiExpandable: true audio: required: - voice @@ -5352,13 +5384,15 @@ components: voice: enum: - alloy + - ash + - ballad + - coral - echo - - fable - - onyx - - nova + - sage - shimmer + - verse type: string - description: "Specifies the voice type. Supported voices are `alloy`, `echo`, \n`fable`, `onyx`, `nova`, and `shimmer`.\n" + description: "The voice the model uses to respond. Supported voices are `alloy`,\n`ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.\n" format: enum: - wav @@ -5367,7 +5401,7 @@ components: - opus - pcm16 type: string - description: "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`. \n" + description: "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`.\n" description: "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](/docs/guides/audio).\n" nullable: true x-oaiExpandable: true @@ -5375,7 +5409,7 @@ components: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" default: 0 nullable: true response_format: @@ -5383,7 +5417,7 @@ components: - $ref: '#/components/schemas/ResponseFormatText' - $ref: '#/components/schemas/ResponseFormatJsonObject' - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" + description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" x-oaiExpandable: true seed: maximum: 9223372036854776000 @@ -5398,7 +5432,7 @@ components: - auto - default type: string - description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. \n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" + description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.\n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" default: auto nullable: true stop: @@ -5446,7 +5480,7 @@ components: $ref: '#/components/schemas/ParallelToolCalls' user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 function_call: oneOf: @@ -5551,7 +5585,7 @@ components: x-oaiMeta: name: The chat completion object group: chat - example: "{\n \"id\": \"chatcmpl-123456\",\n \"object\": \"chat.completion\",\n \"created\": 1728933352,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hi there! How can I assist you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 19,\n \"completion_tokens\": 10,\n \"total_tokens\": 29,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_6b68a8204b\"\n}\n" + example: "{\n \"id\": \"chatcmpl-123456\",\n \"object\": \"chat.completion\",\n \"created\": 1728933352,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hi there! How can I assist you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 19,\n \"completion_tokens\": 10,\n \"total_tokens\": 29,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_6b68a8204b\"\n}\n" CreateChatCompletionStreamResponse: required: - choices @@ -5648,6 +5682,7 @@ components: type: integer description: Total number of tokens used in the request (prompt + completion). description: "An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request.\nWhen present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n" + nullable: true description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' x-oaiMeta: name: The chat completion chunk object @@ -5667,7 +5702,7 @@ components: - davinci-002 - babbage-002 type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" x-oaiTypeLabel: string prompt: oneOf: @@ -5711,7 +5746,7 @@ components: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" default: 0 nullable: true logit_bias: @@ -5748,7 +5783,7 @@ components: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" default: 0 nullable: true seed: @@ -5803,7 +5838,7 @@ components: example: 1 user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 CreateCompletionResponse: required: @@ -5934,7 +5969,7 @@ components: - text-embedding-3-small - text-embedding-3-large type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" example: text-embedding-3-small x-oaiTypeLabel: string encoding_format: @@ -5952,7 +5987,7 @@ components: nullable: true user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 additionalProperties: false CreateEmbeddingResponse: @@ -6023,7 +6058,7 @@ components: - gpt-3.5-turbo - gpt-4o-mini type: string - description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).\n" + description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).\n" example: gpt-4o-mini x-oaiTypeLabel: string training_file: @@ -6181,7 +6216,7 @@ components: example: url user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 CreateImageRequest: required: @@ -6252,7 +6287,7 @@ components: example: vivid user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 CreateImageVariationRequest: required: @@ -6303,7 +6338,7 @@ components: example: 1024x1024 user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 CreateMessageRequest: required: @@ -6331,7 +6366,7 @@ components: - $ref: '#/components/schemas/MessageContentImageUrlObject' - $ref: '#/components/schemas/MessageRequestContentTextObject' x-oaiExpandable: true - description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview).' + description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models).' x-oaiExpandable: true attachments: required: @@ -6430,7 +6465,7 @@ components: - text-moderation-latest - text-moderation-stable type: string - description: "The content moderation model you would like to use. Learn more in\n[the moderation guide](/docs/guides/moderation), and learn about\navailable models [here](/docs/models/moderation).\n" + description: "The content moderation model you would like to use. Learn more in\n[the moderation guide](/docs/guides/moderation), and learn about\navailable models [here](/docs/models#moderation).\n" default: omni-moderation-latest example: omni-moderation-2024-09-26 x-oaiTypeLabel: string @@ -6817,7 +6852,7 @@ components: - tts-1 - tts-1-hd type: string - description: "One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`\n" + description: "One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd`\n" x-oaiTypeLabel: string input: maxLength: 4096 @@ -6832,7 +6867,7 @@ components: - nova - shimmer type: string - description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options).' + description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options).' response_format: enum: - mp3 @@ -7103,7 +7138,7 @@ components: description: "The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.\n" prompt: type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.\n" + description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language.\n" response_format: $ref: '#/components/schemas/AudioResponseFormat' temperature: @@ -7187,7 +7222,7 @@ components: x-oaiTypeLabel: string prompt: type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.\n" + description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English.\n" response_format: $ref: '#/components/schemas/AudioResponseFormat' temperature: @@ -7486,9 +7521,9 @@ components: type: string data: $ref: '#/components/schemas/Error' - description: 'Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout.' + description: 'Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout.' x-oaiMeta: - dataDescription: '`data` is an [error](/docs/guides/error-codes/api-errors)' + dataDescription: '`data` is an [error](/docs/guides/error-codes#api-errors)' ErrorResponse: required: - error @@ -7513,7 +7548,7 @@ components: minimum: 0 type: number description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - description: "The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + description: "The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" FineTuneChatCompletionRequestAssistantMessage: required: - role @@ -8052,16 +8087,27 @@ components: required: - object - data + - first_id + - last_id + - has_more type: object properties: + object: + type: string + example: list data: type: array items: $ref: '#/components/schemas/OpenAIFile' - object: - enum: - - list + first_id: + type: string + example: file-abc123 + last_id: type: string + example: file-abc456 + has_more: + type: boolean + example: false ListFineTuningJobCheckpointsResponse: required: - object @@ -8889,7 +8935,7 @@ components: model: anyOf: - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" name: maxLength: 256 type: string @@ -9082,8 +9128,34 @@ components: description: 'This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.' ParallelToolCalls: type: boolean - description: 'Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.' + description: 'Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.' nullable: true + PredictionContent: + title: Static Content + required: + - type + - content + type: object + properties: + type: + enum: + - content + type: string + description: "The type of the predicted content you want to provide. This type is\ncurrently always `content`.\n" + content: + oneOf: + - title: Text content + type: string + description: "The content used for a Predicted Output. This is often the\ntext of a file you are regenerating with minor changes.\n" + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + description: 'An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs.' + description: "The content that should be matched when generating a model response.\nIf generated tokens would match this content, the entire model response\ncan be returned much more quickly.\n" + x-oaiExpandable: true + description: "Static predicted output content, such as the content of a text file that is\nbeing regenerated.\n" Project: required: - id @@ -9480,61 +9552,17 @@ components: description: Optional client-generated ID used to identify this event. type: type: string - description: 'The event type, must be "conversation.item.create".' + description: 'The event type, must be `conversation.item.create`.' previous_item_id: type: string - description: The ID of the preceding item after which the new item will be inserted. + description: 'The ID of the preceding item after which the new item will be inserted. If not set, the new item will be appended to the end of the conversation. If set, it allows an item to be inserted mid-conversation. If the ID cannot be found, an error will be returned and the item will not be added.' item: - type: object - properties: - id: - type: string - description: The unique ID of the item. - type: - type: string - description: 'The type of the item ("message", "function_call", "function_call_output").' - status: - type: string - description: 'The status of the item ("completed", "in_progress", "incomplete").' - role: - type: string - description: 'The role of the message sender ("user", "assistant", "system").' - content: - type: array - items: - type: object - properties: - type: - type: string - description: 'The content type ("input_text", "input_audio", "text", "audio").' - text: - type: string - description: The text content. - audio: - type: string - description: Base64-encoded audio bytes. - transcript: - type: string - description: The transcript of the audio. - description: The content of the message. - call_id: - type: string - description: The ID of the function call (for "function_call" items). - name: - type: string - description: The name of the function being called (for "function_call" items). - arguments: - type: string - description: The arguments of the function call (for "function_call" items). - output: - type: string - description: The output of the function call (for "function_call_output" items). - description: The item to add to the conversation. - description: Send this event when adding an item to the conversation. + $ref: '#/components/schemas/RealtimeConversationItem' + description: "Add a new Item to the Conversation's context, including messages, function calls, and function call responses. This event can be used both to populate a \"history\" of the conversation and to add new items mid-stream, but has the current limitation that it cannot populate assistant audio messages.\nIf successful, the server will respond with a `conversation.item.created` event, otherwise an `error` event will be sent." x-oaiMeta: name: conversation.item.create group: realtime - example: "{\n \"event_id\": \"event_345\",\n \"type\": \"conversation.item.create\",\n \"previous_item_id\": null,\n \"item\": {\n \"id\": \"msg_001\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_text\",\n \"text\": \"Hello, how are you?\"\n }\n ]\n }\n}\n" + example: "{\n \"event_id\": \"event_345\",\n \"type\": \"conversation.item.create\",\n \"previous_item_id\": null,\n \"item\": {\n \"id\": \"msg_001\",\n \"type\": \"message\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_text\",\n \"text\": \"Hello, how are you?\"\n }\n ]\n }\n}\n" RealtimeClientEventConversationItemDelete: required: - type @@ -9550,7 +9578,7 @@ components: item_id: type: string description: The ID of the item to delete. - description: Send this event when you want to remove any item from the conversation history. + description: 'Send this event when you want to remove any item from the conversation history. The server will respond with a `conversation.item.deleted` event, unless the item does not exist in the conversation history, in which case the server will respond with an error.' x-oaiMeta: name: conversation.item.delete group: realtime @@ -9571,14 +9599,14 @@ components: description: 'The event type, must be "conversation.item.truncate".' item_id: type: string - description: The ID of the assistant message item to truncate. + description: The ID of the assistant message item to truncate. Only assistant message items can be truncated. content_index: type: integer - description: The index of the content part to truncate. + description: The index of the content part to truncate. Set this to 0. audio_end_ms: type: integer - description: 'Inclusive duration up to which audio is truncated, in milliseconds.' - description: Send this event when you want to truncate a previous assistant message’s audio. + description: 'Inclusive duration up to which audio is truncated, in milliseconds. If the audio_end_ms is greater than the actual audio duration, the server will respond with an error.' + description: "Send this event to truncate a previous assistant message’s audio. The server will produce audio faster than realtime, so this event is useful when the user interrupts to truncate audio that has already been sent to the client but not yet played. This will synchronize the server's understanding of the audio with the client's playback.\nTruncating audio will delete the server-side text transcript to ensure there is not text in the context that hasn't been heard by the user.\nIf successful, the server will respond with a `conversation.item.truncated` event. " x-oaiMeta: name: conversation.item.truncate group: realtime @@ -9597,8 +9625,8 @@ components: description: 'The event type, must be "input_audio_buffer.append".' audio: type: string - description: Base64-encoded audio bytes. - description: Send this event to append audio bytes to the input audio buffer. + description: Base64-encoded audio bytes. This must be in the format specified by the `input_audio_format` field in the session configuration. + description: "Send this event to append audio bytes to the input audio buffer. The audio buffer is temporary storage you can write to and later commit. In Server VAD mode, the audio buffer is used to detect speech and the server will decide when to commit. When Server VAD is disabled, you must commit the audio buffer manually.\nThe client may choose how much audio to place in each event up to a maximum of 15 MiB, for example streaming smaller chunks from the client may allow the VAD to be more responsive. Unlike made other client events, the server will not send a confirmation response to this event." x-oaiMeta: name: input_audio_buffer.append group: realtime @@ -9614,7 +9642,7 @@ components: type: type: string description: 'The event type, must be "input_audio_buffer.clear".' - description: Send this event to clear the audio bytes in the buffer. + description: Send this event to clear the audio bytes in the buffer. The server will respond with an `input_audio_buffer.cleared` event. x-oaiMeta: name: input_audio_buffer.clear group: realtime @@ -9630,7 +9658,7 @@ components: type: type: string description: 'The event type, must be "input_audio_buffer.commit".' - description: Send this event to commit audio bytes to a user message. + description: "Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.\nCommitting the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event." x-oaiMeta: name: input_audio_buffer.commit group: realtime @@ -9645,8 +9673,8 @@ components: description: Optional client-generated ID used to identify this event. type: type: string - description: 'The event type, must be "response.cancel".' - description: Send this event to cancel an in-progress response. + description: 'The event type, must be `response.cancel`.' + description: Send this event to cancel an in-progress response. The server will respond with a `response.cancelled` event or an error if there is no response to cancel. x-oaiMeta: name: response.cancel group: realtime @@ -9662,57 +9690,10 @@ components: description: Optional client-generated ID used to identify this event. type: type: string - description: 'The event type, must be "response.create".' + description: 'The event type, must be `response.create`.' response: - type: object - properties: - modalities: - type: array - items: - type: string - description: The modalities for the response. - instructions: - type: string - description: Instructions for the model. - voice: - type: string - description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`.' - output_audio_format: - type: string - description: The format of output audio. - tools: - type: array - items: - type: object - properties: - type: - type: string - description: The type of the tool. - name: - type: string - description: The name of the function. - description: - type: string - description: The description of the function. - parameters: - type: object - description: Parameters of the function in JSON Schema. - description: Tools (functions) available to the model. - tool_choice: - type: string - description: How the model chooses tools. - temperature: - type: number - description: Sampling temperature. - max_output_tokens: - oneOf: - - type: integer - - enum: - - inf - type: string - description: 'Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf".' - description: Configuration for the response. - description: Send this event to trigger a response generation. + $ref: '#/components/schemas/RealtimeResponse' + description: "This event instructs the server to create a Response, which means triggering model inference. When in Server VAD mode, the server will create Responses automatically.\nA Response will include at least one Item, and may have two, in which case the second will be a function call. These Items will be appended to the conversation history.\nThe server will respond with a `response.created` event, events for Items and content created, and finally a `response.done` event to indicate the Response is complete.\nThe `response.create` event includes inference configuration like `instructions`, and `temperature`. These fields will override the Session's configuration for this Response only." x-oaiMeta: name: response.create group: realtime @@ -9730,184 +9711,188 @@ components: type: string description: 'The event type, must be "session.update".' session: - type: object - properties: - modalities: - type: array - items: - type: string - description: 'The set of modalities the model can respond with. To disable audio, set this to ["text"].' - instructions: - type: string - description: The default system instructions prepended to model calls. - voice: - type: string - description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. Cannot be changed once the model has responded with audio at least once.' - input_audio_format: - type: string - description: 'The format of input audio. Options are "pcm16", "g711_ulaw", or "g711_alaw".' - output_audio_format: - type: string - description: 'The format of output audio. Options are "pcm16", "g711_ulaw", or "g711_alaw".' - input_audio_transcription: - type: object - properties: - model: - type: string - description: 'The model to use for transcription (e.g., "whisper-1").' - description: Configuration for input audio transcription. Can be set to `null` to turn off. - turn_detection: - type: object - properties: - type: - type: string - description: 'Type of turn detection, only "server_vad" is currently supported.' - threshold: - type: number - description: Activation threshold for VAD (0.0 to 1.0). - prefix_padding_ms: - type: integer - description: Amount of audio to include before speech starts (in milliseconds). - silence_duration_ms: - type: integer - description: Duration of silence to detect speech stop (in milliseconds). - description: Configuration for turn detection. Can be set to `null` to turn off. - tools: - type: array - items: - type: object - properties: - type: - type: string - description: 'The type of the tool, e.g., "function".' - name: - type: string - description: The name of the function. - description: - type: string - description: The description of the function. - parameters: - type: object - description: Parameters of the function in JSON Schema. - description: Tools (functions) available to the model. - tool_choice: - type: string - description: 'How the model chooses tools. Options are "auto", "none", "required", or specify a function.' - temperature: - type: number - description: Sampling temperature for the model. - max_output_tokens: - oneOf: - - type: integer - - enum: - - inf - type: string - description: 'Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf".' - description: Session configuration to update. - description: Send this event to update the session’s default configuration. + $ref: '#/components/schemas/RealtimeSession' + description: 'Send this event to update the session’s default configuration. The client may send this event at any time to update the session configuration, and any field may be updated at any time, except for "voice". The server will respond with a `session.updated` event that shows the full effective configuration. Only fields that are present are updated, thus the correct way to clear a field like "instructions" is to pass an empty string.' x-oaiMeta: name: session.update group: realtime - example: "{\n \"event_id\": \"event_123\",\n \"type\": \"session.update\",\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"Your knowledge cutoff is 2023-10. You are a helpful assistant.\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather for a location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": { \"type\": \"string\" }\n },\n \"required\": [\"location\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_output_tokens\": null\n }\n}\n" - RealtimeServerEventConversationCreated: - required: - - event_id - - type - - conversation + example: "{\n \"event_id\": \"event_123\",\n \"type\": \"session.update\",\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"Your knowledge cutoff is 2023-10. You are a helpful assistant.\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 500\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather for a location, tell the user you are fetching the weather.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": { \"type\": \"string\" }\n },\n \"required\": [\"location\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": \"inf\"\n }\n}\n" + RealtimeConversationItem: type: object properties: - event_id: - type: string - description: The unique ID of the server event. - type: + id: type: string - description: 'The event type, must be "conversation.created".' - conversation: - type: object - properties: - id: - type: string - description: The unique ID of the conversation. - object: - type: string - description: 'The object type, must be "realtime.conversation".' - description: The conversation resource. - description: Returned when a conversation is created. Emitted right after session creation. - x-oaiMeta: - name: conversation.created - group: realtime - example: "{\n \"event_id\": \"event_9101\",\n \"type\": \"conversation.created\",\n \"conversation\": {\n \"id\": \"conv_001\",\n \"object\": \"realtime.conversation\"\n }\n}\n" - RealtimeServerEventConversationItemCreated: - required: - - event_id - - type - - previous_item_id - - item - type: object - properties: - event_id: + description: The unique ID of the item. + example: msg_003 + object: + enum: + - realtime.item type: string - description: The unique ID of the server event. + description: 'The object type, must be "realtime.item".' + example: realtime.item type: + enum: + - message + - function_call + - function_call_output type: string - description: 'The event type, must be "conversation.item.created".' - previous_item_id: + description: The type of the item. + example: message + status: + enum: + - completed + - in_progress + - incomplete type: string - description: The ID of the preceding item. - item: + description: The status of the item. + example: completed + role: + enum: + - user + - assistant + - system + type: string + description: The role of the message sender. + example: user + content: + type: array + items: + type: object + properties: + type: + enum: + - input_text + - input_audio + - text + - audio + type: string + description: The content type. + example: input_text + text: + type: string + description: The text content (for text or input_text items). + example: 'Hello, how are you?' + audio: + type: string + description: Base64-encoded audio bytes (for audio or input_audio items). + transcript: + type: string + description: The transcript of the audio (for audio items). + call_id: + type: string + description: The ID of the function call (for function_call items). + name: + type: string + description: The name of the function being called (for function_call items). + arguments: + type: string + description: The arguments of the function call (for function_call items). + output: + type: string + description: The output of the function call (for function_call_output items). + description: The content of the message. + description: "A realtime Item is of three types: message, function_call, or function_call_output.\n\nA message item can contain text or audio.\nA function_call item indicates a model's desire to call a function, which is the only tool supported for now\nA function_call_output item indicates a function response.\nThe client may add and remove message and function_call_output Items using conversation.item.create and conversation.item.delete.\n" + RealtimeResponse: + type: object + properties: + id: + type: string + description: The unique ID of the response. + example: resp_001 + object: + enum: + - realtime.response + type: string + description: 'The object type, must be "realtime.response".' + example: realtime.response + status: + enum: + - completed + - in_progress + - cancelled + - failed + - incomplete + type: string + description: The status of the response. + example: in_progress + status_details: + type: object + description: Additional details about the status. + example: + output: + type: array + items: + $ref: '#/components/schemas/RealtimeConversationItem' + description: The list of output items generated by the response. + usage: + type: object + properties: + total_tokens: + type: integer + description: The total number of tokens used. + example: 50 + input_tokens: + type: integer + description: The number of input tokens used. + example: 20 + output_tokens: + type: integer + description: The number of output tokens used. + example: 30 + description: Usage statistics for the response. + description: The response resource. + RealtimeServerEventConversationCreated: + required: + - event_id + - type + - conversation + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + description: 'The event type, must be "conversation.created".' + conversation: type: object properties: id: type: string - description: The unique ID of the item. + description: The unique ID of the conversation. object: type: string - description: 'The object type, must be "realtime.item".' - type: - type: string - description: 'The type of the item ("message", "function_call", "function_call_output").' - status: - type: string - description: 'The status of the item ("completed", "in_progress", "incomplete").' - role: - type: string - description: 'The role associated with the item ("user", "assistant", "system").' - content: - type: array - items: - type: object - properties: - type: - type: string - description: 'The content type ("text", "audio", "input_text", "input_audio").' - text: - type: string - description: The text content. - audio: - type: string - description: Base64-encoded audio data. - transcript: - type: string - description: The transcript of the audio. - description: The content of the item. - call_id: - type: string - description: The ID of the function call (for "function_call" items). - name: - type: string - description: The name of the function being called. - arguments: - type: string - description: The arguments of the function call. - output: - type: string - description: The output of the function call (for "function_call_output" items). - description: The item that was created. - description: Returned when a conversation item is created. + description: 'The object type, must be "realtime.conversation".' + description: The conversation resource. + description: Returned when a conversation is created. Emitted right after session creation. + x-oaiMeta: + name: conversation.created + group: realtime + example: "{\n \"event_id\": \"event_9101\",\n \"type\": \"conversation.created\",\n \"conversation\": {\n \"id\": \"conv_001\",\n \"object\": \"realtime.conversation\"\n }\n}\n" + RealtimeServerEventConversationItemCreated: + required: + - event_id + - type + - previous_item_id + - item + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + description: 'The event type, must be `conversation.item.created`.' + previous_item_id: + type: string + description: 'The ID of the preceding item in the Conversation context, allows the client to understand the order of the conversation.' + item: + $ref: '#/components/schemas/RealtimeConversationItem' + description: "Returned when a conversation item is created. There are several scenarios that produce this event:\n - The server is generating a Response, which if successful will produce either one or two Items, which will be of type `message` (role `assistant`) or type `function_call`.\n - The input audio buffer has been committed, either by the client or the server (in `server_vad` mode). The server will take the content of the input audio buffer and add it to a new user message Item.\n - The client has sent a `conversation.item.create` event to add a new Item to the Conversation." x-oaiMeta: name: conversation.item.created group: realtime - example: "{\n \"event_id\": \"event_1920\",\n \"type\": \"conversation.item.created\",\n \"previous_item_id\": \"msg_002\",\n \"item\": {\n \"id\": \"msg_003\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_audio\",\n \"transcript\": null\n }\n ]\n }\n}\n" + example: "{\n \"event_id\": \"event_1920\",\n \"type\": \"conversation.item.created\",\n \"previous_item_id\": \"msg_002\",\n \"item\": {\n \"id\": \"msg_003\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_audio\",\n \"transcript\": \"hello how are you\",\n \"audio\": \"base64encodedaudio==\"\n }\n ]\n }\n}\n" RealtimeServerEventConversationItemDeleted: required: - event_id @@ -9920,11 +9905,11 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "conversation.item.deleted".' + description: 'The event type, must be `conversation.item.deleted`.' item_id: type: string description: The ID of the item that was deleted. - description: Returned when an item in the conversation is deleted. + description: Returned when an item in the conversation is deleted by the client with a `conversation.item.delete` event. This event is used to synchronize the server's understanding of the conversation history with the client's view. x-oaiMeta: name: conversation.item.deleted group: realtime @@ -9943,17 +9928,17 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "conversation.item.input_audio_transcription.completed".' + description: 'The event type, must be `conversation.item.input_audio_transcription.completed`.' item_id: type: string - description: The ID of the user message item. + description: The ID of the user message item containing the audio. content_index: type: integer description: The index of the content part containing the audio. transcript: type: string description: The transcribed text. - description: Returned when input audio transcription is enabled and a transcription succeeds. + description: "This event is the output of audio transcription for user audio written to the user audio buffer. Transcription begins when the input audio buffer is committed by the client or server (in `server_vad` mode). Transcription runs asynchronously with Response creation, so this event may come before or after the Response events.\nRealtime API models accept audio natively, and thus input transcription is a separate process run on a separate ASR (Automatic Speech Recognition) model, currently always `whisper-1`. Thus the transcript may diverge somewhat from the model's interpretation, and should be treated as a rough guide." x-oaiMeta: name: conversation.item.input_audio_transcription.completed group: realtime @@ -9972,7 +9957,7 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "conversation.item.input_audio_transcription.failed".' + description: 'The event type, must be `conversation.item.input_audio_transcription.failed`.' item_id: type: string description: The ID of the user message item. @@ -9995,7 +9980,7 @@ components: type: string description: 'Parameter related to the error, if any.' description: Details of the transcription error. - description: 'Returned when input audio transcription is configured, and a transcription request for a user message failed.' + description: 'Returned when input audio transcription is configured, and a transcription request for a user message failed. These events are separate from other `error` events so that the client can identify the related Item.' x-oaiMeta: name: conversation.item.input_audio_transcription.failed group: realtime @@ -10014,7 +9999,7 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "conversation.item.truncated".' + description: 'The event type, must be `conversation.item.truncated`.' item_id: type: string description: The ID of the assistant message item that was truncated. @@ -10024,7 +10009,7 @@ components: audio_end_ms: type: integer description: 'The duration up to which the audio was truncated, in milliseconds.' - description: Returned when an earlier assistant audio message item is truncated by the client. + description: "Returned when an earlier assistant audio message item is truncated by the client with a `conversation.item.truncate` event. This event is used to synchronize the server's understanding of the audio with the client's playback.\nThis action will truncate the audio and remove the server-side text transcript to ensure there is no text in the context that hasn't been heard by the user." x-oaiMeta: name: conversation.item.truncated group: realtime @@ -10061,7 +10046,7 @@ components: type: string description: 'The event_id of the client event that caused the error, if applicable.' description: Details of the error. - description: Returned when an error occurs. + description: 'Returned when an error occurs, which could be a client problem or a server problem. Most errors are recoverable and the session will stay open, we recommend to implementors to monitor and log error messages by default.' x-oaiMeta: name: error group: realtime @@ -10077,8 +10062,8 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "input_audio_buffer.cleared".' - description: Returned when the input audio buffer is cleared by the client. + description: 'The event type, must be `input_audio_buffer.cleared`.' + description: Returned when the input audio buffer is cleared by the client with a `input_audio_buffer.clear` event. x-oaiMeta: name: input_audio_buffer.cleared group: realtime @@ -10096,14 +10081,14 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "input_audio_buffer.committed".' + description: 'The event type, must be `input_audio_buffer.committed`.' previous_item_id: type: string description: The ID of the preceding item after which the new item will be inserted. item_id: type: string description: The ID of the user message item that will be created. - description: 'Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode.' + description: 'Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode. The `item_id` property is the ID of the user message item that will be created, thus a `conversation.item.created` event will also be sent to the client.' x-oaiMeta: name: input_audio_buffer.committed group: realtime @@ -10121,14 +10106,14 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "input_audio_buffer.speech_started".' + description: 'The event type, must be `input_audio_buffer.speech_started`.' audio_start_ms: type: integer - description: Milliseconds since the session started when speech was detected. + description: 'Milliseconds from the start of all audio written to the buffer during the session when speech was first detected. This will correspond to the beginning of audio sent to the model, and thus includes the `prefix_padding_ms` configured in the Session.' item_id: type: string description: The ID of the user message item that will be created when speech stops. - description: Returned in server turn detection mode when speech is detected. + description: Sent by the server when in `server_vad` mode to indicate that speech has been detected in the audio buffer. This can happen any time audio is added to the buffer (unless speech is already detected). The client may want to use this event to interrupt audio playback or provide visual feedback to the user. The client should expect to receive a `input_audio_buffer.speech_stopped` event when speech stops. The `item_id` property is the ID of the user message item that will be created when speech stops and will also be included in the `input_audio_buffer.speech_stopped` event (unless the client manually commits the audio buffer during VAD activation). x-oaiMeta: name: input_audio_buffer.speech_started group: realtime @@ -10146,14 +10131,14 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "input_audio_buffer.speech_stopped".' + description: 'The event type, must be `input_audio_buffer.speech_stopped`.' audio_end_ms: type: integer - description: Milliseconds since the session started when speech stopped. + description: 'Milliseconds since the session started when speech stopped. This will correspond to the end of audio sent to the model, and thus includes the `min_silence_duration_ms` configured in the Session.' item_id: type: string description: The ID of the user message item that will be created. - description: Returned in server turn detection mode when speech stops. + description: Returned in `server_vad` mode when the server detects the end of speech in the audio buffer. The server will also send an `conversation.item.created` event with the user message item that is created from the audio buffer. x-oaiMeta: name: input_audio_buffer.speech_stopped group: realtime @@ -10170,7 +10155,7 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "rate_limits.updated".' + description: 'The event type, must be `rate_limits.updated`.' rate_limits: type: array items: @@ -10178,7 +10163,7 @@ components: properties: name: type: string - description: 'The name of the rate limit ("requests", "tokens", "input_tokens", "output_tokens").' + description: 'The name of the rate limit (`requests`, `tokens`).' limit: type: integer description: The maximum allowed value for the rate limit. @@ -10189,7 +10174,7 @@ components: type: number description: Seconds until the rate limit resets. description: List of rate limit information. - description: Emitted after every "response.done" event to indicate the updated rate limits. + description: 'Emitted at the beginning of a Response to indicate the updated rate limits. When a Response is created some tokens will be "reserved" for the output tokens, the rate limits shown here reflect that reservation, which is then adjusted accordingly once the Response is completed.' x-oaiMeta: name: rate_limits.updated group: realtime @@ -10450,33 +10435,10 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "response.created".' + description: 'The event type, must be `response.created`.' response: - type: object - properties: - id: - type: string - description: The unique ID of the response. - object: - type: string - description: 'The object type, must be "realtime.response".' - status: - type: string - description: The status of the response ("in_progress"). - status_details: - type: object - description: Additional details about the status. - output: - type: array - items: - type: object - description: An item in the response output. - description: The list of output items generated by the response. - usage: - type: object - description: Usage statistics for the response. - description: The response resource. - description: 'Returned when a new Response is created. The first event of response creation, where the response is in an initial state of "in_progress".' + $ref: '#/components/schemas/RealtimeResponse' + description: 'Returned when a new Response is created. The first event of response creation, where the response is in an initial state of `in_progress`.' x-oaiMeta: name: response.created group: realtime @@ -10495,35 +10457,12 @@ components: type: string description: 'The event type, must be "response.done".' response: - type: object - properties: - id: - type: string - description: The unique ID of the response. - object: - type: string - description: 'The object type, must be "realtime.response".' - status: - type: string - description: 'The final status of the response ("completed", "cancelled", "failed", "incomplete").' - status_details: - type: object - description: Additional details about the status. - output: - type: array - items: - type: object - description: An item in the response output. - description: The list of output items generated by the response. - usage: - type: object - description: Usage statistics for the response. - description: The response resource. - description: 'Returned when a Response is done streaming. Always emitted, no matter the final state.' + $ref: '#/components/schemas/RealtimeResponse' + description: 'Returned when a Response is done streaming. Always emitted, no matter the final state. The Response object included in the `response.done` event will include all output Items in the Response but will omit the raw audio data.' x-oaiMeta: name: response.done group: realtime - example: "{\n \"event_id\": \"event_3132\",\n \"type\": \"response.done\",\n \"response\": {\n \"id\": \"resp_001\",\n \"object\": \"realtime.response\",\n \"status\": \"completed\",\n \"status_details\": null,\n \"output\": [\n {\n \"id\": \"msg_006\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Sure, how can I assist you today?\"\n }\n ]\n }\n ],\n \"usage\": {\n \"total_tokens\": 50,\n \"input_tokens\": 20,\n \"output_tokens\": 30\n }\n }\n}\n" + example: "{\n \"event_id\": \"event_3132\",\n \"type\": \"response.done\",\n \"response\": {\n \"id\": \"resp_001\",\n \"object\": \"realtime.response\",\n \"status\": \"completed\",\n \"status_details\": null,\n \"output\": [\n {\n \"id\": \"msg_006\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Sure, how can I assist you today?\"\n }\n ]\n }\n ],\n \"usage\": {\n \"total_tokens\":275,\n \"input_tokens\":127,\n \"output_tokens\":148,\n \"input_token_details\": {\n \"cached_tokens\":0,\n \"text_tokens\":119,\n \"audio_tokens\":8\n },\n \"output_token_details\": {\n \"text_tokens\":36,\n \"audio_tokens\":112\n }\n }\n }\n}\n" RealtimeServerEventResponseFunctionCallArgumentsDelta: required: - event_id @@ -10612,51 +10551,16 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "response.output_item.added".' + description: 'The event type, must be `response.output_item.added`.' response_id: type: string - description: The ID of the response to which the item belongs. + description: The ID of the Response to which the item belongs. output_index: type: integer - description: The index of the output item in the response. + description: The index of the output item in the Response. item: - type: object - properties: - id: - type: string - description: The unique ID of the item. - object: - type: string - description: 'The object type, must be "realtime.item".' - type: - type: string - description: 'The type of the item ("message", "function_call", "function_call_output").' - status: - type: string - description: 'The status of the item ("in_progress", "completed").' - role: - type: string - description: The role associated with the item ("assistant"). - content: - type: array - items: - type: object - properties: - type: - type: string - description: 'The content type ("text", "audio").' - text: - type: string - description: The text content. - audio: - type: string - description: Base64-encoded audio data. - transcript: - type: string - description: The transcript of the audio. - description: The content of the item. - description: The item that was added. - description: Returned when a new Item is created during response generation. + $ref: '#/components/schemas/RealtimeConversationItem' + description: Returned when a new Item is created during Response generation. x-oaiMeta: name: response.output_item.added group: realtime @@ -10675,50 +10579,15 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "response.output_item.done".' + description: 'The event type, must be `response.output_item.done`.' response_id: type: string - description: The ID of the response to which the item belongs. + description: The ID of the Response to which the item belongs. output_index: type: integer - description: The index of the output item in the response. + description: The index of the output item in the Response. item: - type: object - properties: - id: - type: string - description: The unique ID of the item. - object: - type: string - description: 'The object type, must be "realtime.item".' - type: - type: string - description: 'The type of the item ("message", "function_call", "function_call_output").' - status: - type: string - description: 'The final status of the item ("completed", "incomplete").' - role: - type: string - description: The role associated with the item ("assistant"). - content: - type: array - items: - type: object - properties: - type: - type: string - description: 'The content type ("text", "audio").' - text: - type: string - description: The text content. - audio: - type: string - description: Base64-encoded audio data. - transcript: - type: string - description: The transcript of the audio. - description: The content of the item. - description: The completed item. + $ref: '#/components/schemas/RealtimeConversationItem' description: 'Returned when an Item is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.' x-oaiMeta: name: response.output_item.done @@ -10810,99 +10679,14 @@ components: description: The unique ID of the server event. type: type: string - description: 'The event type, must be "session.created".' + description: 'The event type, must be `session.created`.' session: - type: object - properties: - id: - type: string - description: The unique ID of the session. - object: - type: string - description: 'The object type, must be "realtime.session".' - model: - type: string - description: The default model used for this session. - modalities: - type: array - items: - type: string - description: The set of modalities the model can respond with. - instructions: - type: string - description: The default system instructions. - voice: - type: string - description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`.' - input_audio_format: - type: string - description: The format of input audio. - output_audio_format: - type: string - description: The format of output audio. - input_audio_transcription: - type: object - properties: - enabled: - type: boolean - description: Whether input audio transcription is enabled. - model: - type: string - description: The model used for transcription. - description: Configuration for input audio transcription. - turn_detection: - type: object - properties: - type: - type: string - description: The type of turn detection ("server_vad" or "none"). - threshold: - type: number - description: Activation threshold for VAD. - prefix_padding_ms: - type: integer - description: Audio included before speech starts (in milliseconds). - silence_duration_ms: - type: integer - description: Duration of silence to detect speech stop (in milliseconds). - description: Configuration for turn detection. - tools: - type: array - items: - type: object - properties: - type: - type: string - description: The type of the tool. - name: - type: string - description: The name of the function. - description: - type: string - description: The description of the function. - parameters: - type: object - description: Parameters of the function in JSON Schema. - description: Tools (functions) available to the model. - tool_choice: - type: string - description: How the model chooses tools. - temperature: - type: number - description: Sampling temperature. - max_output_tokens: - oneOf: - - type: integer - - enum: - - inf - type: string - description: Maximum number of output tokens. - description: The session resource. - description: Returned when a session is created. Emitted automatically when a new connection is established. + $ref: '#/components/schemas/RealtimeSession' + description: Returned when a Session is created. Emitted automatically when a new connection is established as the first server event. This event will contain the default Session configuration. x-oaiMeta: name: session.created group: realtime - example: "{\n \"event_id\": \"event_1234\",\n \"type\": \"session.created\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": null,\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_output_tokens\": null\n }\n}\n" + example: "{\n \"event_id\": \"event_1234\",\n \"type\": \"session.created\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": null,\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": null\n }\n}\n" RealtimeServerEventSessionUpdated: required: - event_id @@ -10917,97 +10701,130 @@ components: type: string description: 'The event type, must be "session.updated".' session: + $ref: '#/components/schemas/RealtimeSession' + description: 'Returned when a session is updated with a `session.update` event, unless there is an error.' + x-oaiMeta: + name: session.updated + group: realtime + example: "{\n \"event_id\": \"event_5678\",\n \"type\": \"session.updated\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\"],\n \"instructions\": \"New instructions\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": null,\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_response_output_tokens\": 200\n }\n}\n" + RealtimeSession: + type: object + properties: + id: + type: string + description: The unique ID of the session. + example: sess_001 + object: + enum: + - realtime.session + type: string + description: 'The object type, must be "realtime.session".' + example: realtime.session + model: + type: string + description: The default model used for this session. + example: gpt-4o-realtime-preview-2024-10-01 + modalities: + type: array + items: + type: string + description: 'The set of modalities the model can respond with. To disable audio, set this to ["text"].' + example: + - text + - audio + instructions: + type: string + description: The default system instructions prepended to model calls. + example: Your knowledge cutoff is 2023-10. You are a helpful assistant. + voice: + enum: + - alloy + - echo + - shimmer + type: string + description: The voice the model uses to respond. Cannot be changed once the model has responded with audio at least once. + example: alloy + input_audio_format: + $ref: '#/components/schemas/RealtimeAudioFormat' + output_audio_format: + $ref: '#/components/schemas/RealtimeAudioFormat' + input_audio_transcription: type: object properties: - id: - type: string - description: The unique ID of the session. - object: - type: string - description: 'The object type, must be "realtime.session".' + enabled: + type: boolean + description: Whether transcription is enabled. + example: true model: type: string - description: The default model used for this session. - modalities: - type: array - items: - type: string - description: The set of modalities the model can respond with. - instructions: - type: string - description: The default system instructions. - voice: - type: string - description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`.' - input_audio_format: - type: string - description: The format of input audio. - output_audio_format: - type: string - description: The format of output audio. - input_audio_transcription: - type: object - properties: - enabled: - type: boolean - description: Whether input audio transcription is enabled. - model: - type: string - description: The model used for transcription. - description: Configuration for input audio transcription. - turn_detection: - type: object - properties: - type: - type: string - description: The type of turn detection ("server_vad" or "none"). - threshold: - type: number - description: Activation threshold for VAD. - prefix_padding_ms: - type: integer - description: Audio included before speech starts (in milliseconds). - silence_duration_ms: - type: integer - description: Duration of silence to detect speech stop (in milliseconds). - description: Configuration for turn detection. - tools: - type: array - items: - type: object - properties: - type: - type: string - description: The type of the tool. - name: - type: string - description: The name of the function. - description: - type: string - description: The description of the function. - parameters: - type: object - description: Parameters of the function in JSON Schema. - description: Tools (functions) available to the model. - tool_choice: + description: The model to use for transcription. + example: whisper-1 + description: Configuration for input audio transcription. Can be set to null to turn off. + turn_detection: + type: object + properties: + type: + enum: + - server_vad + - none type: string - description: How the model chooses tools. - temperature: + description: The type of turn detection. + example: server_vad + threshold: type: number - description: Sampling temperature. - max_output_tokens: - oneOf: - - type: integer - - enum: - - inf - type: string - description: Maximum number of output tokens. - description: The updated session resource. - description: Returned when a session is updated. - x-oaiMeta: - name: session.updated - group: realtime - example: "{\n \"event_id\": \"event_5678\",\n \"type\": \"session.updated\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\"],\n \"instructions\": \"New instructions\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"enabled\": true,\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"none\"\n },\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_output_tokens\": 200\n }\n}\n" + description: Activation threshold for VAD (0.0 to 1.0). + example: 0.5 + prefix_padding_ms: + type: integer + description: Amount of audio to include before speech starts (in milliseconds). + example: 300 + silence_duration_ms: + type: integer + description: Duration of silence to detect speech stop (in milliseconds). + example: 200 + description: Configuration for turn detection. Can be set to null to turn off. + tools: + type: array + items: + type: object + properties: + type: + type: string + description: 'The type of the tool, e.g., "function".' + example: function + name: + type: string + description: The name of the function. + example: get_weather + description: + type: string + description: The description of the function. + example: Get the current weather for a location. + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + enum: + - auto + - none + - required + type: string + description: How the model chooses tools. + example: auto + temperature: + type: number + description: Sampling temperature for the model. + example: 0.8 + max_output_tokens: + oneOf: + - type: integer + - enum: + - inf + type: string + description: 'Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf".' + default: inf + description: "A session refers to a single WebSocket connection between a client and the server.\n\nOnce a client creates a session, it then sends JSON-formatted events containing text and audio chunks.\nThe server will respond in kind with audio containing voice output, a text transcript of that voice output,\nand function calls (if functions are provided by the client).\n\nA realtime Session represents the overall client-server interaction, and contains default configuration.\n\nIt has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create).\n" ResponseFormatJsonObject: required: - type @@ -12774,124 +12591,6 @@ components: - g711_alaw type: string description: The format of input/output audio. - RealtimeSession: - type: object - properties: - id: - type: string - description: The unique ID of the session. - example: sess_001 - object: - enum: - - realtime.session - type: string - description: 'The object type, must be "realtime.session".' - example: realtime.session - model: - type: string - description: The default model used for this session. - example: gpt-4o-realtime-preview-2024-10-01 - modalities: - type: array - items: - type: string - description: 'The set of modalities the model can respond with. To disable audio, set this to ["text"].' - example: - - text - - audio - instructions: - type: string - description: The default system instructions prepended to model calls. - example: Your knowledge cutoff is 2023-10. You are a helpful assistant. - voice: - enum: - - alloy - - echo - - shimmer - type: string - description: The voice the model uses to respond. Cannot be changed once the model has responded with audio at least once. - example: alloy - input_audio_format: - $ref: '#/components/schemas/RealtimeAudioFormat' - output_audio_format: - $ref: '#/components/schemas/RealtimeAudioFormat' - input_audio_transcription: - type: object - properties: - enabled: - type: boolean - description: Whether transcription is enabled. - example: true - model: - type: string - description: The model to use for transcription. - example: whisper-1 - description: Configuration for input audio transcription. Can be set to null to turn off. - turn_detection: - type: object - properties: - type: - enum: - - server_vad - - none - type: string - description: The type of turn detection. - example: server_vad - threshold: - type: number - description: Activation threshold for VAD (0.0 to 1.0). - example: 0.5 - prefix_padding_ms: - type: integer - description: Amount of audio to include before speech starts (in milliseconds). - example: 300 - silence_duration_ms: - type: integer - description: Duration of silence to detect speech stop (in milliseconds). - example: 200 - description: Configuration for turn detection. Can be set to null to turn off. - tools: - type: array - items: - type: object - properties: - type: - type: string - description: 'The type of the tool, e.g., "function".' - example: function - name: - type: string - description: The name of the function. - example: get_weather - description: - type: string - description: The description of the function. - example: Get the current weather for a location. - parameters: - type: object - description: Parameters of the function in JSON Schema. - description: Tools (functions) available to the model. - tool_choice: - enum: - - auto - - none - - required - type: string - description: How the model chooses tools. - example: auto - temperature: - type: number - description: Sampling temperature for the model. - example: 0.8 - max_output_tokens: - oneOf: - - type: integer - - enum: - - inf - type: string - description: 'Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf".' - default: inf - description: "A session refers to a single WebSocket connection between a client and the server.\n\nOnce a client creates a session, it then sends JSON-formatted events containing text and audio chunks.\nThe server will respond in kind with audio containing voice output, a text transcript of that voice output,\nand function calls (if functions are provided by the client).\n\nA realtime Session represents the overall client-server interaction, and contains default configuration.\n\nIt has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create).\n" RealtimeConversation: type: object properties: @@ -12909,81 +12608,6 @@ components: example: id: conv_001 object: realtime.conversation - RealtimeConversationItem: - type: object - properties: - id: - type: string - description: The unique ID of the item. - example: msg_003 - object: - enum: - - realtime.item - type: string - description: 'The object type, must be "realtime.item".' - example: realtime.item - type: - enum: - - message - - function_call - - function_call_output - type: string - description: The type of the item. - example: message - status: - enum: - - completed - - in_progress - - incomplete - type: string - description: The status of the item. - example: completed - role: - enum: - - user - - assistant - - system - type: string - description: The role of the message sender. - example: user - content: - type: array - items: - type: object - properties: - type: - enum: - - input_text - - input_audio - - text - - audio - type: string - description: The content type. - example: input_text - text: - type: string - description: The text content (for text or input_text items). - example: 'Hello, how are you?' - audio: - type: string - description: Base64-encoded audio bytes (for audio or input_audio items). - transcript: - type: string - description: The transcript of the audio (for audio items). - call_id: - type: string - description: The ID of the function call (for function_call items). - name: - type: string - description: The name of the function being called (for function_call items). - arguments: - type: string - description: The arguments of the function call (for function_call items). - output: - type: string - description: The output of the function call (for function_call_output items). - description: The content of the message. - description: "A realtime Item is of three types: message, function_call, or function_call_output.\n\nA message item can contain text or audio.\nA function_call item indicates a model's desire to call a function, which is the only tool supported for now\nA function_call_output item indicates a function response.\nThe client may add and remove message and function_call_output Items using conversation.item.create and conversation.item.delete.\n" RealtimeContentPart: type: object properties: @@ -13035,55 +12659,6 @@ components: message: The 'type' field is missing. param: event_id: event_567 - RealtimeResponse: - type: object - properties: - id: - type: string - description: The unique ID of the response. - example: resp_001 - object: - enum: - - realtime.response - type: string - description: 'The object type, must be "realtime.response".' - example: realtime.response - status: - enum: - - completed - - in_progress - - cancelled - - failed - - incomplete - type: string - description: The status of the response. - example: in_progress - status_details: - type: object - description: Additional details about the status. - example: - output: - type: array - items: - $ref: '#/components/schemas/RealtimeConversationItem' - description: The list of output items generated by the response. - usage: - type: object - properties: - total_tokens: - type: integer - description: The total number of tokens used. - example: 50 - input_tokens: - type: integer - description: The number of input tokens used. - example: 20 - output_tokens: - type: integer - description: The number of output tokens used. - example: 30 - description: Usage statistics for the response. - description: The response resource. RealtimeSessionUpdate: type: object properties: @@ -14776,7 +14351,7 @@ x-oaiMeta: path: events - id: administration title: Administration - description: "Programmatically manage your organization. \nThe Audit Logs endpoint provides a log of all actions taken in the organization for security and monitoring purposes.\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization)\n" + description: "Programmatically manage your organization. \nThe Audit Logs endpoint provides a log of all actions taken in the organization for security and monitoring purposes.\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices#setting-up-your-organization)\n" navigationGroup: administration - id: invite title: Invites @@ -15042,7 +14617,7 @@ x-oaiMeta: title: Completions legacy: true navigationGroup: legacy - description: "Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models.\n" + description: "Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation#text-generation-models) to leverage our best and newest models.\n" sections: - type: endpoint key: createCompletion