llama <<
Previous Next >> AI
nginx
透過 ollama 與 nginx 的結合,在 Web based 介面中使用 llama:
index.html
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>Ollama Chat</title>
<style>
body { font-family: sans-serif; margin: 2rem; max-width: 700px; }
textarea { width: 100%; height: 100px; }
pre { background: #eee; padding: 1em; white-space: pre-wrap; }
</style>
</head>
<body>
<h1>🧠 Chat with LLaMA (via Ollama)</h1>
<textarea id="prompt" placeholder="Say something..."></textarea><br/>
<button onclick="send()">Send</button>
<pre id="output"></pre>
<script>
async function send() {
const prompt = document.getElementById("prompt").value;
document.getElementById("output").textContent = "Thinking...";
const res = await fetch("/api/generate", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: "llama3",
prompt,
stream: false
})
});
const result = await res.json();
document.getElementById("output").textContent = result.response;
}
</script>
</body>
</html>
nginx.conf 中對應的 server 設定:
server {
listen 80;
server_name your.domain.com;
location / {
root /var/www/html;
index index.html;
}
location /api/ {
proxy_pass http://localhost:11434/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
執行 ollama serve 或使用 ollama run 時,ollama 套件會在 http://localhost:11434 開啟一個 RESTful API 伺服器,讓你可以透過程式或前端介面與模型互動。
因此執行 ollama run llama3.1:latest 之後,再伺服 nginx 的 index.html 後,就可以從 WWW 伺服器連接頁面上,與 llama 語言模型系統對話。
若 ollama 所執行的模型支援 vision 功能,則可以進一步延伸 index.html,使用者可以上傳圖片與 AI 模型對話
index_vision.html
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>Ollama Chat + Image/File Upload</title>
<style>
body { font-family: sans-serif; max-width: 800px; margin: 2rem auto; }
textarea { width: 100%; height: 80px; }
.chatbox { white-space: pre-wrap; background: #f4f4f4; padding: 1rem; border-radius: 8px; margin-bottom: 1rem; }
.file-label { margin-top: 0.5rem; display: block; font-weight: bold; }
</style>
</head>
<body>
<h2>Chat with LLaMA + Upload Image/Text</h2>
<div class="chatbox" id="chatlog"></div>
<textarea id="prompt" placeholder="Say something..."></textarea><br>
<label class="file-label">Upload image (.png/.jpg) or text (.txt/.md):</label>
<input type="file" id="fileInput" accept=".png,.jpg,.jpeg,.txt,.md"><br><br>
<button onclick="send()">Send</button>
<script>
const chatlog = document.getElementById("chatlog");
async function send() {
const promptInput = document.getElementById("prompt").value.trim();
const fileInput = document.getElementById("fileInput");
if (!promptInput && fileInput.files.length === 0) return;
let userMessage = promptInput || "[No prompt]";
let fullPrompt = promptInput;
// 檔案處理
if (fileInput.files.length > 0) {
const file = fileInput.files[0];
const type = file.type;
if (type.startsWith("image/")) {
const base64 = await fileToBase64(file);
fullPrompt += `\n\n[以下為圖片 base64 編碼:]\n${base64}`;
} else if (type === "text/plain" || file.name.endsWith(".md")) {
const text = await file.text();
fullPrompt += `\n\n[以下為文件內容:]\n${text}`;
} else {
appendChat("Unsupported file type.");
return;
}
}
appendChat("You:\n" + userMessage);
appendChat("LLaMA is thinking...");
const res = await fetch("/api/generate", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: "llama3",
prompt: fullPrompt,
stream: false
})
});
const result = await res.json();
updateLastBotReply("LLaMA:\n" + result.response);
}
function appendChat(text) {
const div = document.createElement("div");
div.className = "chatbox";
div.textContent = text;
chatlog.appendChild(div);
window.scrollTo(0, document.body.scrollHeight);
}
function updateLastBotReply(text) {
const boxes = document.getElementsByClassName("chatbox");
boxes[boxes.length - 1].textContent = text;
}
function fileToBase64(file) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => resolve(reader.result.split(",")[1]);
reader.onerror = reject;
reader.readAsDataURL(file);
});
}
</script>
</body>
</html>
或直接將 ollama 結合 https://www.openwebui.com/ 提供 Web based 對話進階功能。
llama <<
Previous Next >> AI