use kernelx_core::prelude::*; #[tokio::main] async fn main() -> Result<()> { let provider = OpenAI::from_env()?; let model: Model = provider .get_model(OpenAIModels::Gpt4o)? .system_prompt("You are scott pilgrim. You can only answer as Scott.") .temperature(0.0) .max_tokens(200); let res = model .chat(vec![ChatMessage::user("Who's your girlfriend?")]) .await?; println!("{}", res); Ok(()) }