@@ -1559,69 +1559,6 @@ API هوش مصنوعی لیارا، ورودی صوتی با فرمت <Importan
1559
1559
برای استفاده از پرامپتهای متنی همراه با ورودی صوتی در OpenAI SDK، میتوانید از کدها و دستورات زیر استفاده کنید:
1560
1560
<HighlightTabs
1561
1561
tabs = { [
1562
- {
1563
- label: " cURL" ,
1564
- language: " bash" ,
1565
- code: ` #!/bin/bash
1566
- # create a file named command.sh and run with \` bash command.sh liara-song.mp3\`
1567
-
1568
- if [ $# -eq 0 ]; then
1569
- echo "Usage: $0 <audio_file.mp3>"
1570
- echo "Example: $0 liara-song.mp3"
1571
- exit 1
1572
- fi
1573
-
1574
- AUDIO_FILE="$1"
1575
-
1576
- if [ ! -f "$AUDIO_FILE" ]; then
1577
- echo "Error: Audio file '$AUDIO_FILE' not found"
1578
- exit 1
1579
- fi
1580
-
1581
- echo "Encoding audio file to base64..."
1582
-
1583
- AUDIO_BASE64=$(base64 -w 0 "$AUDIO_FILE")
1584
-
1585
- echo "Creating JSON payload..."
1586
-
1587
- cat > /tmp/payload.json << EOF
1588
- {
1589
- "model": "google/gemini-2.5-flash",
1590
- "messages": [
1591
- {
1592
- "role": "user",
1593
- "content": [
1594
- {
1595
- "type": "text",
1596
- "text": "What is the audio saying?"
1597
- },
1598
- {
1599
- "type": "input_audio",
1600
- "input_audio": {
1601
- "data": "$AUDIO_BASE64",
1602
- "format": "mp3"
1603
- }
1604
- }
1605
- ]
1606
- }
1607
- ]
1608
- }
1609
- EOF
1610
-
1611
- echo "Sending request to API..."
1612
-
1613
- curl -X POST "<baseUrl>/chat/completions" \\
1614
- -H "Content-Type: application/json" \\
1615
- -H "Authorization: Bearer <LIARA_API_KEY>" \\
1616
- -d @/tmp/payload.json
1617
-
1618
- echo ""
1619
- echo "Cleaning up temporary files..."
1620
-
1621
- rm /tmp/payload.json
1622
-
1623
- echo "Done!" ` ,
1624
- },
1625
1562
{
1626
1563
label: " JavaScript" ,
1627
1564
icon: <PlatformIcon platform = " nodejs" style = { { width: 32 , height: 32 }} />,
@@ -1807,6 +1744,157 @@ class Program
1807
1744
}
1808
1745
` ,
1809
1746
},
1747
+ {
1748
+ label: " Go" ,
1749
+ icon: <PlatformIcon platform = " go" style = { { width: 32 , height: 32 }} />,
1750
+ language: " go" ,
1751
+ code: ` // go get github.com/openai/openai-go
1752
+ // go get github.com/openai/openai-go/option
1753
+
1754
+ package main
1755
+
1756
+ import (
1757
+ "context"
1758
+ "encoding/base64"
1759
+ "encoding/json"
1760
+ "fmt"
1761
+ "log"
1762
+ "os"
1763
+
1764
+ openai "github.com/openai/openai-go/v2"
1765
+ "github.com/openai/openai-go/v2/option"
1766
+ )
1767
+
1768
+ type InputAudio struct {
1769
+ Data string \` json:"data"\`
1770
+ Format string \` json:"format"\`
1771
+ }
1772
+
1773
+ type ContentItem struct {
1774
+ Type string \` json:"type"\`
1775
+ Text string \` json:"text,omitempty"\`
1776
+ InputAudio *InputAudio \` json:"input_audio,omitempty"\`
1777
+ }
1778
+
1779
+ type Message struct {
1780
+ Role string \` json:"role"\`
1781
+ Content []ContentItem \` json:"content"\`
1782
+ }
1783
+
1784
+ func main() {
1785
+ audioPath := "liara-song.mp3"
1786
+
1787
+ audioBytes, err := os.ReadFile(audioPath)
1788
+ if err != nil {
1789
+ log.Fatalf("Failed to read audio file: %v", err)
1790
+ }
1791
+
1792
+ base64Audio := base64.StdEncoding.EncodeToString(audioBytes)
1793
+
1794
+ client := openai.NewClient(
1795
+ option.WithAPIKey("<LIARA_API_KEY>"),
1796
+ option.WithBaseURL("<BASE_URL"),
1797
+ )
1798
+
1799
+ messages := []Message{
1800
+ {
1801
+ Role: "user",
1802
+ Content: []ContentItem{
1803
+ {Type: "text", Text: "What is the audio saying?"},
1804
+ {Type: "input_audio", InputAudio: &InputAudio{
1805
+ Data: base64Audio,
1806
+ Format: "mp3",
1807
+ }},
1808
+ },
1809
+ },
1810
+ }
1811
+
1812
+ params := map[string]interface{}{
1813
+ "model": "google/gemini-2.5-flash",
1814
+ "messages": messages,
1815
+ }
1816
+
1817
+ var result map[string]interface{}
1818
+ if err := client.Post(context.Background(), "/v1/chat/completions", params, &result); err != nil {
1819
+ log.Fatalf("API request failed: %v", err)
1820
+ }
1821
+
1822
+ if choices, ok := result["choices"].([]interface{}); ok && len(choices) > 0 {
1823
+ if msg, ok := choices[0].(map[string]interface{})["message"].(map[string]interface{}); ok {
1824
+ content, _ := json.MarshalIndent(msg["content"], "", " ")
1825
+ fmt.Println("Model Response:", string(content))
1826
+ } else {
1827
+ fmt.Printf("Unexpected message format: %+v\\ n", choices[0])
1828
+ }
1829
+ } else {
1830
+ fmt.Printf("No choices returned. Full response: %+v\\ n", result)
1831
+ }
1832
+ }
1833
+ ` ,
1834
+ },
1835
+ {
1836
+ label: " cURL" ,
1837
+ language: " bash" ,
1838
+ code: ` #!/bin/bash
1839
+ # create a file named command.sh and run with \` bash command.sh liara-song.mp3\`
1840
+
1841
+ if [ $# -eq 0 ]; then
1842
+ echo "Usage: $0 <audio_file.mp3>"
1843
+ echo "Example: $0 liara-song.mp3"
1844
+ exit 1
1845
+ fi
1846
+
1847
+ AUDIO_FILE="$1"
1848
+
1849
+ if [ ! -f "$AUDIO_FILE" ]; then
1850
+ echo "Error: Audio file '$AUDIO_FILE' not found"
1851
+ exit 1
1852
+ fi
1853
+
1854
+ echo "Encoding audio file to base64..."
1855
+
1856
+ AUDIO_BASE64=$(base64 -w 0 "$AUDIO_FILE")
1857
+
1858
+ echo "Creating JSON payload..."
1859
+
1860
+ cat > /tmp/payload.json << EOF
1861
+ {
1862
+ "model": "google/gemini-2.5-flash",
1863
+ "messages": [
1864
+ {
1865
+ "role": "user",
1866
+ "content": [
1867
+ {
1868
+ "type": "text",
1869
+ "text": "What is the audio saying?"
1870
+ },
1871
+ {
1872
+ "type": "input_audio",
1873
+ "input_audio": {
1874
+ "data": "$AUDIO_BASE64",
1875
+ "format": "mp3"
1876
+ }
1877
+ }
1878
+ ]
1879
+ }
1880
+ ]
1881
+ }
1882
+ EOF
1883
+
1884
+ echo "Sending request to API..."
1885
+
1886
+ curl -X POST "<baseUrl>/chat/completions" \\
1887
+ -H "Content-Type: application/json" \\
1888
+ -H "Authorization: Bearer <LIARA_API_KEY>" \\
1889
+ -d @/tmp/payload.json
1890
+
1891
+ echo ""
1892
+ echo "Cleaning up temporary files..."
1893
+
1894
+ rm /tmp/payload.json
1895
+
1896
+ echo "Done!" ` ,
1897
+ },
1810
1898
]}
1811
1899
/>
1812
1900
</>,
0 commit comments