Untitled
def extract_overview_and_html(self, response_text): overview = "" match_overview = re.search(r"OVERVIEW_START([\s\S]*?)OVERVIEW_END", response_text) if match_overview: overview = match_overview.group(1).strip() html_code = "" match_html = re.search(r"HTML_START([\s\S]*?)HTML_END", response_text) if match_html: html_code = match_html.group(1).strip() return overview, html_code template = config['configurable']['template'] graph_data = [i.content for i in state['graph_data']] template = [{"section_data": graph_data[i], **sec} for i, sec in enumerate(template)] sections = '\n'.join('> {}. {}'.format(n, i['sectionTitle']) for n, i in enumerate(template, start=1)) template_data_str = json.dumps(template, indent=2) prompt = f""" You are a data analyst expert. Task description:- - Produce a broad textual overview (as plain text) about what the data represents and what each visualization (chart/table) will depict. and must contain the Plotly <script> plus all charts/tables described by each widget. - For any chart types (as hinted by the 'widget_description'), choose the most appropriate Plotly visualization (donut pie chart, bar chart, table, etc.) - After each chart in the HTML, include a short <p> or <div> element that briefly explains what that chart shows (the “detailed explanation”). -Embed Plotly directly so the file is self-contained (with external CDNs as needed). -Do not place the overview inside the HTML. The overview and the HTML must be returned separately. -Output Format: - Wrap the textual overview in lines that say: ``` OVERVIEW_START [Your plain-text overview here] OVERVIEW_END ``` - Followed by: ``` HTML_START HTML_END ``` - Do not include any extra text outside these markers. -Generate {len(sections)} sections and {state['template_widgets']} from following sections data {sections} This template comprises multiple sections that each contain widgets. each section has some widgets inside it. each widget comprise of one analysis chart/insight. template data contains {template_data_str} """ llm_response = self.llm.invoke([HumanMessage(content=prompt)]) response_text = llm_response.content overview_section, html_section = self.extract_overview_and_html(response_text) if html_section.strip()[:7].lower() != '```html': html_section = "```html " + html_section + " ```" # calculate tokens state['token_usage']['prompt_tokens'] += self.token_calculator(prompt) state['token_usage']['completion_tokens'] += self.token_calculator(response_text) state['token_usage']['total_tokens'] += self.token_calculator(prompt) + self.token_calculator( response_text) # return {"messages": llm_response} return {"messages": AIMessage(content=html_section), "overview_data": overview_section}
Leave a Comment