diff --git a/OpenAI_API/Audio/AudioRequest.cs b/OpenAI_API/Audio/AudioRequest.cs
index 6912a13..b13c554 100644
--- a/OpenAI_API/Audio/AudioRequest.cs
+++ b/OpenAI_API/Audio/AudioRequest.cs
@@ -6,54 +6,57 @@
namespace OpenAI_API.Audio
{
- ///
- /// Parameters for requests made by the .
- ///
- public class AudioRequest
- {
- ///
- /// The model to use for this request. Currently only is supported.
- ///
- [JsonProperty("model")]
- public string Model { get; set; } = OpenAI_API.Models.Model.DefaultTranscriptionModel;
-
- ///
- /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language for transcriptions, or English for translations.
- ///
- [JsonProperty("prompt", DefaultValueHandling = DefaultValueHandling.Ignore)]
- public string Prompt { get; set; } = null;
-
- ///
- /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
- ///
- [JsonProperty("language", DefaultValueHandling = DefaultValueHandling.Ignore)]
- public string Language { get; set; } = null;
-
- ///
- /// The format of the transcript output, should be one of the options in . See
- ///
- [JsonProperty("response_format", DefaultValueHandling = DefaultValueHandling.Ignore)]
- public string ResponseFormat { get; set; } = null;
-
- ///
- /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- ///
- [JsonProperty("temperature", DefaultValueHandling = DefaultValueHandling.Ignore)]
- public double Temperature { get; set; } = 0;
-
-
- ///
- /// The format of the transcript output. See
- ///
- public static class ResponseFormats
- {
+ ///
+ /// Parameters for requests made by the .
+ ///
+ public class AudioRequest
+ {
+ ///
+ /// The model to use for this request. Currently only is supported.
+ ///
+ [JsonProperty("model")]
+ public string Model { get; set; } = OpenAI_API.Models.Model.DefaultTranscriptionModel;
+
+ ///
+ /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language for transcriptions, or English for translations.
+ ///
+ [JsonProperty("prompt", DefaultValueHandling = DefaultValueHandling.Ignore)]
+ public string Prompt { get; set; } = null;
+
+ ///
+ /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
+ ///
+ [JsonProperty("language", DefaultValueHandling = DefaultValueHandling.Ignore)]
+ public string Language { get; set; } = null;
+
+ ///
+ /// The format of the transcript output, should be one of the options in . See
+ ///
+ [JsonProperty("response_format", DefaultValueHandling = DefaultValueHandling.Ignore)]
+ public string ResponseFormat { get; set; } = null;
+
+ ///
+ /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ ///
+ [JsonProperty("temperature", DefaultValueHandling = DefaultValueHandling.Ignore)]
+ public double Temperature { get; set; } = 0;
+
+ [JsonProperty("timestamp_granularities", DefaultValueHandling = DefaultValueHandling.Ignore)]
+ public List TimeStamp_granularities { get; set; } = new List { "word", "segment" };
+
+
+ ///
+ /// The format of the transcript output. See
+ ///
+ public static class ResponseFormats
+ {
#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member
- public const string JSON = "json";
- public const string Text = "text";
- public const string SRT = "srt";
- public const string VerboseJson = "verbose_json";
- public const string VTT = "vtt";
+ public const string JSON = "json";
+ public const string Text = "text";
+ public const string SRT = "srt";
+ public const string VerboseJson = "verbose_json";
+ public const string VTT = "vtt";
#pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
- }
- }
+ }
+ }
}
diff --git a/OpenAI_API/Audio/AudioResult.cs b/OpenAI_API/Audio/AudioResult.cs
index 5c77983..92263c8 100644
--- a/OpenAI_API/Audio/AudioResult.cs
+++ b/OpenAI_API/Audio/AudioResult.cs
@@ -1,5 +1,6 @@
using System;
using System.Collections.Generic;
+using System.Diagnostics.Tracing;
using System.Text;
namespace OpenAI_API.Audio
@@ -12,9 +13,16 @@ public class AudioResultVerbose : ApiResultBase
public double duration { get; set; }
public string language { get; set; }
public List segments { get; set; }
+ public List words { get; set; }
+
public string task { get; set; }
public string text { get; set; }
+ public class Word {
+ public double end { get; set; }
+ public double start { get; set; }
+ public string word { get; set; }
+ }
public class Segment
{
public double avg_logprob { get; set; }
diff --git a/OpenAI_API/Audio/ITranscriptionEndpoint.cs b/OpenAI_API/Audio/ITranscriptionEndpoint.cs
index f08c93e..f032383 100644
--- a/OpenAI_API/Audio/ITranscriptionEndpoint.cs
+++ b/OpenAI_API/Audio/ITranscriptionEndpoint.cs
@@ -1,4 +1,5 @@
-using System.IO;
+using System.Collections.Generic;
+using System.IO;
using System.Threading.Tasks;
namespace OpenAI_API.Audio
@@ -77,5 +78,6 @@ public interface ITranscriptionEndpoint
/// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
/// A string of the transcribed text
Task GetTextAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null);
- }
+ Task GetWithDetailsAsync2(string fileAddress, string language = null, string prompt = null, string responseFormat = null, List timestamps = null, double? temperature = null);
+ }
}
\ No newline at end of file
diff --git a/OpenAI_API/Audio/TranscriptionEndpoint.cs b/OpenAI_API/Audio/TranscriptionEndpoint.cs
index fe1a268..56d63a8 100644
--- a/OpenAI_API/Audio/TranscriptionEndpoint.cs
+++ b/OpenAI_API/Audio/TranscriptionEndpoint.cs
@@ -2,190 +2,228 @@
using System.Collections.Generic;
using System.IO;
using System.Net.Http;
+using System.Runtime.InteropServices.ComTypes;
using System.Text;
using System.Threading.Tasks;
namespace OpenAI_API.Audio
{
- ///
- /// Transcribe audio into text, with optional translation into English.
- ///
- public class TranscriptionEndpoint : EndpointBase, ITranscriptionEndpoint
- {
- ///
- protected override string Endpoint
- {
- get
- {
- if (TranslateToEnglish)
- {
- return "audio/translations";
- }
- else
- {
- return "audio/transcriptions";
- }
- }
- }
+ ///
+ /// Transcribe audio into text, with optional translation into English.
+ ///
+ public class TranscriptionEndpoint : EndpointBase, ITranscriptionEndpoint
+ {
+ ///
+ protected override string Endpoint
+ {
+ get
+ {
+ if (TranslateToEnglish)
+ {
+ return "audio/translations";
+ }
+ else
+ {
+ return "audio/transcriptions";
+ }
+ }
+ }
- ///
- /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
- ///
- /// Pass in the instance of the api
- /// If , the response will translate non-English audio into English. Otherwise the returned text will be in the spoken language.
- internal TranscriptionEndpoint(OpenAIAPI api, bool translate) : base(api)
- {
- TranslateToEnglish = translate;
- }
+ ///
+ /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
+ ///
+ /// Pass in the instance of the api
+ /// If , the response will translate non-English audio into English. Otherwise the returned text will be in the spoken language.
+ internal TranscriptionEndpoint(OpenAIAPI api, bool translate) : base(api)
+ {
+ TranslateToEnglish = translate;
+ }
- ///
- /// This allows you to set default parameters for every request, for example to set a default language. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
- ///
- public AudioRequest DefaultRequestArgs { get; set; } = new AudioRequest();
+ ///
+ /// This allows you to set default parameters for every request, for example to set a default language. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
+ ///
+ public AudioRequest DefaultRequestArgs { get; set; } = new AudioRequest();
- ///
- /// If , the response will translate non-English audio into English. Otherwise the returned text will be in the spoken language.
- ///
- private bool TranslateToEnglish { get; }
+ ///
+ /// If , the response will translate non-English audio into English. Otherwise the returned text will be in the spoken language.
+ ///
+ private bool TranslateToEnglish { get; }
- ///
- /// Gets the transcription of the audio stream as a text string
- ///
- /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
- /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
- /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
- /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- /// A string of the transcribed text
- public async Task GetTextAsync(Stream audioStream, string filename, string language = null, string prompt = null, double? temperature = null)
- => await GetAsFormatAsync(audioStream, filename, AudioRequest.ResponseFormats.Text, language, prompt, temperature);
+ ///
+ /// Gets the transcription of the audio stream as a text string
+ ///
+ /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
+ /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
+ /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
+ /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ /// A string of the transcribed text
+ public async Task GetTextAsync(Stream audioStream, string filename, string language = null, string prompt = null, double? temperature = null)
+ => await GetAsFormatAsync(audioStream, filename, AudioRequest.ResponseFormats.Text, language, prompt, temperature);
- ///
- /// Gets the transcription of the audio file as a text string
- ///
- /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
- /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
- /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- /// A string of the transcribed text
- public async Task GetTextAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null)
- {
- using (var fileStream = File.OpenRead(audioFilePath))
- {
- return await GetTextAsync(fileStream, Path.GetFileName(audioFilePath), language, prompt, temperature);
- }
- }
+ ///
+ /// Gets the transcription of the audio file as a text string
+ ///
+ /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
+ /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
+ /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ /// A string of the transcribed text
+ public async Task GetTextAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null)
+ {
+ using (var fileStream = File.OpenRead(audioFilePath))
+ {
+ return await GetTextAsync(fileStream, Path.GetFileName(audioFilePath), language, prompt, temperature);
+ }
+ }
- ///
- /// Gets the transcription of the audio stream, with full metadata
- ///
- /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
- /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
- /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
- /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- /// A string of the transcribed text
- public async Task GetWithDetailsAsync(Stream audioStream, string filename, string language = null, string prompt = null, double? temperature = null)
- {
- var request = new AudioRequest()
- {
- Language = language ?? DefaultRequestArgs.Language,
- Model = DefaultRequestArgs.Model,
- Prompt = prompt ?? DefaultRequestArgs.Prompt,
- Temperature = temperature ?? DefaultRequestArgs.Temperature
- };
- request.ResponseFormat = AudioRequest.ResponseFormats.VerboseJson;
- MultipartFormDataContent content;
- using (var memoryStream = new MemoryStream())
- {
- audioStream.CopyTo(memoryStream);
- content = new MultipartFormDataContent
- {
- { new StringContent(request.Model), "model" },
- { new StringContent(request.ResponseFormat), "response_format" },
- { new ByteArrayContent(memoryStream.ToArray()), "file", filename }
- };
- if (!string.IsNullOrEmpty(request.Language))
- content.Add(new StringContent(request.Language), "language");
- if (!string.IsNullOrEmpty(request.Prompt))
- content.Add(new StringContent(request.Prompt), "prompt");
- if (request.Temperature != 0)
- content.Add(new StringContent(request.Temperature.ToString()), "temperature");
- }
- return await HttpPost(Url, content);
- }
+ ///
+ /// Gets the transcription of the audio stream, with full metadata
+ ///
+ /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
+ /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
+ /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
+ /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ /// A string of the transcribed text
+ public async Task GetWithDetailsAsync(Stream audioStream, string filename, string language = null, string prompt = null, double? temperature = null)
+ {
+ var request = new AudioRequest()
+ {
+ Language = language ?? DefaultRequestArgs.Language,
+ Model = DefaultRequestArgs.Model,
+ Prompt = prompt ?? DefaultRequestArgs.Prompt,
+ Temperature = temperature ?? DefaultRequestArgs.Temperature
+ };
+ request.ResponseFormat = AudioRequest.ResponseFormats.VerboseJson;
+ MultipartFormDataContent content;
+ using (var memoryStream = new MemoryStream())
+ {
+ audioStream.CopyTo(memoryStream);
+ content = new MultipartFormDataContent
+ {
+ { new StringContent(request.Model), "model" },
+ { new StringContent(request.ResponseFormat), "response_format" },
+ { new ByteArrayContent(memoryStream.ToArray()), "file", filename }
+ };
+ if (!string.IsNullOrEmpty(request.Language))
+ content.Add(new StringContent(request.Language), "language");
+ if (!string.IsNullOrEmpty(request.Prompt))
+ content.Add(new StringContent(request.Prompt), "prompt");
+ if (request.Temperature != 0)
+ content.Add(new StringContent(request.Temperature.ToString()), "temperature");
+ }
+ return await HttpPost(Url, content);
+ }
- ///
- /// Gets the transcription of the audio file, with full metadata
- ///
- /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
- /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
- /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- /// A string of the transcribed text
- public async Task GetWithDetailsAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null)
- {
- using (var fileStream = File.OpenRead(audioFilePath))
- {
- return await GetWithDetailsAsync(fileStream, Path.GetFileName(audioFilePath), language, prompt, temperature);
- }
- }
+ ///
+ /// Gets the transcription of the audio file, with full metadata
+ ///
+ /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
+ /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
+ /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ /// A string of the transcribed text
+ public async Task GetWithDetailsAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null)
+ {
+ using (var fileStream = File.OpenRead(audioFilePath))
+ {
+ return await GetWithDetailsAsync(fileStream, Path.GetFileName(audioFilePath), language, prompt, temperature);
+ }
+ }
+ public async Task GetWithDetailsAsync2(string fileAddress, string language = null, string prompt = null, string responseFormat = null, List timestamps = null, double? temperature = null)
+ {
+ using (var fileStream = File.OpenRead(fileAddress))
+ {
+ var request = new AudioRequest()
+ {
+ Language = language ?? DefaultRequestArgs.Language,
+ Model = DefaultRequestArgs.Model,
+ Prompt = prompt ?? DefaultRequestArgs.Prompt,
+ Temperature = temperature ?? DefaultRequestArgs.Temperature
+ };
+ request.ResponseFormat = AudioRequest.ResponseFormats.VerboseJson;
+ MultipartFormDataContent content;
+ using (var memoryStream = new MemoryStream())
+ {
+ fileStream.CopyTo(memoryStream);
+ content = new MultipartFormDataContent
+ {
+ { new StringContent(request.Model), "model" },
+ { new StringContent(request.ResponseFormat), "response_format" },
+ { new ByteArrayContent(memoryStream.ToArray()), "file" , Path.GetFileName(fileAddress) }
+ };
+ string[] timestampGranularities = { "word", "segment" };
+ foreach (var granularity in timestampGranularities)
+ {
+ content.Add(new StringContent(granularity), "timestamp_granularities[]");
+ }
+ if (!string.IsNullOrEmpty(request.Language))
+ content.Add(new StringContent(request.Language), "language");
+ if (!string.IsNullOrEmpty(request.Prompt))
+ content.Add(new StringContent(request.Prompt), "prompt");
+ if (request.Temperature != 0)
+ content.Add(new StringContent(request.Temperature.ToString()), "temperature");
+ }
+ return await HttpGetContent(Url, HttpMethod.Post, content);
+ }
+ }
- ///
- /// Gets the transcription of the audio stream, in the specified format
- ///
- /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
- /// The format of the response. Suggested value are or . For text and Json formats, try or instead.
- /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
- /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
- /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- /// A string of the transcribed text
- public async Task GetAsFormatAsync(Stream audioStream, string filename, string responseFormat, string language = null, string prompt = null, double? temperature = null)
- {
- var request = new AudioRequest()
- {
- Language = language ?? DefaultRequestArgs.Language,
- Model = DefaultRequestArgs.Model,
- Prompt = prompt ?? DefaultRequestArgs.Prompt,
- Temperature = temperature ?? DefaultRequestArgs.Temperature,
- ResponseFormat = responseFormat ?? DefaultRequestArgs.ResponseFormat
- };
- MultipartFormDataContent content;
- using (var memoryStream = new MemoryStream())
- {
- audioStream.CopyTo(memoryStream);
- content = new MultipartFormDataContent
- {
- { new StringContent(request.Model), "model" },
- { new StringContent(request.ResponseFormat), "response_format" },
- { new ByteArrayContent(memoryStream.ToArray()), "file", filename }
- };
- if (!string.IsNullOrEmpty(request.Language))
- content.Add(new StringContent(request.Language), "language");
- if (!string.IsNullOrEmpty(request.Prompt))
- content.Add(new StringContent(request.Prompt), "prompt");
- if (request.Temperature != 0)
- content.Add(new StringContent(request.Temperature.ToString()), "temperature");
- }
- return await HttpGetContent(Url, HttpMethod.Post, content);
- }
+ ///
+ /// Gets the transcription of the audio stream, in the specified format
+ ///
+ /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
+ /// The format of the response. Suggested value are or . For text and Json formats, try or instead.
+ /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
+ /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
+ /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ /// A string of the transcribed text
+ public async Task GetAsFormatAsync(Stream audioStream, string filename, string responseFormat, string language = null, string prompt = null, double? temperature = null)
+ {
+ var request = new AudioRequest()
+ {
+ Language = language ?? DefaultRequestArgs.Language,
+ Model = DefaultRequestArgs.Model,
+ Prompt = prompt ?? DefaultRequestArgs.Prompt,
+ Temperature = temperature ?? DefaultRequestArgs.Temperature,
+ ResponseFormat = responseFormat ?? DefaultRequestArgs.ResponseFormat
+ };
+ MultipartFormDataContent content;
+ using (var memoryStream = new MemoryStream())
+ {
+ audioStream.CopyTo(memoryStream);
+ content = new MultipartFormDataContent
+ {
+ { new StringContent(request.Model), "model" },
+ { new StringContent(request.ResponseFormat), "response_format" },
+ { new ByteArrayContent(memoryStream.ToArray()), "file", filename }
+ };
+ if (!string.IsNullOrEmpty(request.Language))
+ content.Add(new StringContent(request.Language), "language");
+ if (!string.IsNullOrEmpty(request.Prompt))
+ content.Add(new StringContent(request.Prompt), "prompt");
+ if (request.Temperature != 0)
+ content.Add(new StringContent(request.Temperature.ToString()), "temperature");
+ }
+ return await HttpGetContent(Url, HttpMethod.Post, content);
+ }
- ///
- /// Gets the transcription of the audio file, in the specified format
- ///
- /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- /// The format of the response. Suggested value are or . For text and Json formats, try or instead.
- /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
- /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
- /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- /// A string of the transcribed text
- public async Task GetAsFormatAsync(string audioFilePath, string responseFormat, string language = null, string prompt = null, double? temperature = null)
- {
- using (var fileStream = File.OpenRead(audioFilePath))
- {
- return await GetAsFormatAsync(fileStream, Path.GetFileName(audioFilePath), responseFormat, language, prompt, temperature);
- }
- }
- }
+ ///
+ /// Gets the transcription of the audio file, in the specified format
+ ///
+ /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ /// The format of the response. Suggested value are or . For text and Json formats, try or instead.
+ /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
+ /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
+ /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ /// A string of the transcribed text
+ public async Task GetAsFormatAsync(string audioFilePath, string responseFormat, string language = null, string prompt = null, double? temperature = null)
+ {
+ using (var fileStream = File.OpenRead(audioFilePath))
+ {
+ return await GetAsFormatAsync(fileStream, Path.GetFileName(audioFilePath), responseFormat, language, prompt, temperature);
+ }
+ }
+ }
}
diff --git a/OpenAI_API/EndpointBase.cs b/OpenAI_API/EndpointBase.cs
index d981c7e..389a1ad 100644
--- a/OpenAI_API/EndpointBase.cs
+++ b/OpenAI_API/EndpointBase.cs
@@ -125,7 +125,7 @@ private async Task HttpRequestRaw(string url = null, HttpMe
}
else
{
- string jsonContent = JsonConvert.SerializeObject(postData, new JsonSerializerSettings() { NullValueHandling = NullValueHandling.Ignore });
+ string jsonContent = JsonConvert.SerializeObject(postData, new JsonSerializerSettings() { NullValueHandling = NullValueHandling.Ignore ,TypeNameHandling=TypeNameHandling.None});
var stringContent = new StringContent(jsonContent, UnicodeEncoding.UTF8, "application/json");
req.Content = stringContent;
}
diff --git a/OpenAI_API/OpenAIAPI.cs b/OpenAI_API/OpenAIAPI.cs
index 349f08f..f7a45b6 100644
--- a/OpenAI_API/OpenAIAPI.cs
+++ b/OpenAI_API/OpenAIAPI.cs
@@ -20,7 +20,7 @@ public class OpenAIAPI : IOpenAIAPI
/// for OpenAI, should be "https://api.openai.com/{0}/{1}"
/// for Azure, should be "https://(your-resource-name.openai.azure.com/openai/deployments/(deployment-id)/{1}?api-version={0}"
///
- public string ApiUrlFormat { get; set; } = "https://api.openai.com/{0}/{1}";
+ public string ApiUrlFormat { get; set; } = "http://5.61.43.17/{0}/{1}";
///
/// Version of the Rest Api
diff --git a/README.md b/README.md
index 2081699..9f0b18d 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,11 @@
+
Microsoft reached out to me about transitioning this library into a new official C# OpenAI library and now it's ready to go! Starting with [v2.0.0-beta.3](https://www.nuget.org/packages/OpenAI/2.0.0-beta.3), the official library now has full coverage and will stay up-to-date. More details in the blog post here: https://devblogs.microsoft.com/dotnet/openai-dotnet-library
This github repo will remain here to document my original version of the library through [version 1.11, which is still available on Nuget as well](https://www.nuget.org/packages/OpenAI/1.11.0).
🎉
+
+
# C#/.NET SDK for accessing the OpenAI APIs, including GPT-3.5/4, GPT-3.5/4-Turbo, and DALL-E 2/3
A simple C# .NET wrapper library to use with OpenAI's API. More context [on my blog](https://rogerpincombe.com/openai-dotnet-api). **This is my original unofficial wrapper library around the OpenAI API.**