from llama_index.core.callbacks import CallbackManagerfrom llama_index.core.service_context import ServiceContextimport chainlit as cl@cl.on_chat_startasyncdefstart(): service_context = ServiceContext.from_defaults(callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]))# use the service context to create the predictor