"""
农技智能问答
基于大模型的农业技术知识问答应用
"""
import json
import httpx
import streamlit as st
from config import CHAT_API_URL, CHAT_MODEL, HEADERS
# ─── Page Config ────────────────────────────────────────────────────────────
st.set_page_config(
page_title="农技问答",
page_icon="🌾",
layout="centered",
initial_sidebar_state="collapsed",
)
# ─── Custom CSS ──────────────────────────────────────────────────────────────
st.markdown("""
""", unsafe_allow_html=True)
# ─── Quick Questions ─────────────────────────────────────────────────────────
QUICK_QUESTIONS = [
"水稻稻瘟病怎么防治?",
"小麦锈病怎么处理?",
"玉米什么时候浇水最合适?",
"种榴莲需要注意什么?",
]
# ─── Header ──────────────────────────────────────────────────────────────────
st.markdown("""
""", unsafe_allow_html=True)
# ─── Quick Questions ─────────────────────────────────────────────────────────
chips_html = ''
for q in QUICK_QUESTIONS:
chips_html += f'{q}'
chips_html += '
'
st.markdown(chips_html, unsafe_allow_html=True)
# Handle chip clicks via native buttons (invisible, placed neatly)
cols = st.columns(len(QUICK_QUESTIONS))
for i, q in enumerate(QUICK_QUESTIONS):
with cols[i]:
st.markdown("" + " " + "
", unsafe_allow_html=True)
if st.button(q, key=f"chip_{q}", use_container_width=True):
st.session_state.user_input = q
st.rerun()
# ─── Input ───────────────────────────────────────────────────────────────────
st.markdown('', unsafe_allow_html=True)
user_input = st.text_area(
"请输入您的问题",
value=st.session_state.get("user_input", ""),
height=110,
placeholder="例如:水稻稻瘟病怎么防治?",
label_visibility="collapsed",
)
col1, col2 = st.columns([1, 6])
with col1:
submitted = st.button("发送", use_container_width=True)
st.markdown('
', unsafe_allow_html=True)
# ─── Settings Panel ──────────────────────────────────────────────────────────
with st.expander("⚙️ 模型设置"):
col_t, col_p, col_c = st.columns(3)
with col_t:
temperature = st.slider("Temperature", 0.0, 1.0, 0.7, 0.1)
with col_p:
top_p = st.slider("Top P", 0.0, 1.0, 0.8, 0.1)
with col_c:
enable_thinking = st.checkbox("显示推理过程", value=True)
# ─── Request & Stream ────────────────────────────────────────────────────────
if submitted and user_input.strip():
try:
payload = {
"model": CHAT_MODEL,
"messages": [{"role": "user", "content": user_input.strip()}],
"temperature": temperature,
"top_p": top_p,
"presence_penalty": 1.5,
"chat_template_kwargs": {"enable_thinking": enable_thinking},
"stream": True,
}
thinking_placeholder = st.empty()
answer_placeholder = st.empty()
full_reasoning = ""
full_content = ""
with httpx.stream(
"POST", CHAT_API_URL, headers=HEADERS, json=payload, timeout=120
) as resp:
resp.raise_for_status()
for line in resp.iter_lines():
if not line.startswith("data: "):
continue
data_str = line[6:]
if data_str == "[DONE]":
break
try:
chunk = json.loads(data_str)
except json.JSONDecodeError:
continue
delta = chunk.get("choices", [{}])[0].get("delta", {})
reasoning_piece = delta.get("reasoning_content", "")
if reasoning_piece:
full_reasoning += reasoning_piece
if enable_thinking:
thinking_placeholder.markdown(
f''
f'
正在思考...'
f'
{full_reasoning}
'
f'
',
unsafe_allow_html=True,
)
content_piece = delta.get("content", "")
if content_piece:
full_content += content_piece
answer_placeholder.markdown(
f''
f'
🌱 回答
'
f'
'
f'{full_content}
'
f'
',
unsafe_allow_html=True,
)
if full_reasoning and enable_thinking:
thinking_placeholder.empty()
with st.expander("查看推理过程"):
st.markdown(full_reasoning)
except httpx.HTTPStatusError as e:
st.markdown(
f'请求失败 (HTTP {e.response.status_code}): {e.response.text}
',
unsafe_allow_html=True,
)
except Exception as e:
st.markdown(
f'请求异常: {e}
',
unsafe_allow_html=True,
)
# ─── Footer ───────────────────────────────────────────────────────────────────
st.markdown("""
""", unsafe_allow_html=True)