[Chore/Refactor] Use centralized naive_utc_now for UTC datetime operations (#24352)

Signed-off-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
-LAN-
2025-08-22 23:53:05 +08:00
committed by GitHub
parent 295b47cbff
commit da9af7b547
34 changed files with 153 additions and 150 deletions

View File

@@ -50,6 +50,7 @@ from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution
from core.workflow.nodes import NodeType
from core.workflow.nodes.tool.entities import ToolNodeData
from core.workflow.workflow_type_encoder import WorkflowRuntimeTypeConverter
from libs.datetime_utils import naive_utc_now
from models import (
Account,
CreatorUserRole,
@@ -399,7 +400,7 @@ class WorkflowResponseConverter:
if event.error is None
else WorkflowNodeExecutionStatus.FAILED,
error=None,
elapsed_time=(datetime.now(UTC).replace(tzinfo=None) - event.start_at).total_seconds(),
elapsed_time=(naive_utc_now() - event.start_at).total_seconds(),
total_tokens=event.metadata.get("total_tokens", 0) if event.metadata else 0,
execution_metadata=event.metadata,
finished_at=int(time.time()),
@@ -478,7 +479,7 @@ class WorkflowResponseConverter:
if event.error is None
else WorkflowNodeExecutionStatus.FAILED,
error=None,
elapsed_time=(datetime.now(UTC).replace(tzinfo=None) - event.start_at).total_seconds(),
elapsed_time=(naive_utc_now() - event.start_at).total_seconds(),
total_tokens=event.metadata.get("total_tokens", 0) if event.metadata else 0,
execution_metadata=event.metadata,
finished_at=int(time.time()),

View File

@@ -1,4 +1,3 @@
import datetime
import json
import logging
from collections import defaultdict
@@ -29,6 +28,7 @@ from core.model_runtime.model_providers.__base.ai_model import AIModel
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.plugin.entities.plugin import ModelProviderID
from extensions.ext_database import db
from libs.datetime_utils import naive_utc_now
from models.provider import (
LoadBalancingModelConfig,
Provider,
@@ -261,7 +261,7 @@ class ProviderConfiguration(BaseModel):
if provider_record:
provider_record.encrypted_config = json.dumps(credentials)
provider_record.is_valid = True
provider_record.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
provider_record.updated_at = naive_utc_now()
db.session.commit()
else:
provider_record = Provider()
@@ -426,7 +426,7 @@ class ProviderConfiguration(BaseModel):
if provider_model_record:
provider_model_record.encrypted_config = json.dumps(credentials)
provider_model_record.is_valid = True
provider_model_record.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
provider_model_record.updated_at = naive_utc_now()
db.session.commit()
else:
provider_model_record = ProviderModel()
@@ -501,7 +501,7 @@ class ProviderConfiguration(BaseModel):
if model_setting:
model_setting.enabled = True
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
model_setting.updated_at = naive_utc_now()
db.session.commit()
else:
model_setting = ProviderModelSetting()
@@ -526,7 +526,7 @@ class ProviderConfiguration(BaseModel):
if model_setting:
model_setting.enabled = False
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
model_setting.updated_at = naive_utc_now()
db.session.commit()
else:
model_setting = ProviderModelSetting()
@@ -599,7 +599,7 @@ class ProviderConfiguration(BaseModel):
if model_setting:
model_setting.load_balancing_enabled = True
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
model_setting.updated_at = naive_utc_now()
db.session.commit()
else:
model_setting = ProviderModelSetting()
@@ -638,7 +638,7 @@ class ProviderConfiguration(BaseModel):
if model_setting:
model_setting.load_balancing_enabled = False
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
model_setting.updated_at = naive_utc_now()
db.session.commit()
else:
model_setting = ProviderModelSetting()

View File

@@ -1,5 +1,4 @@
import concurrent.futures
import datetime
import json
import logging
import re
@@ -34,6 +33,7 @@ from extensions.ext_database import db
from extensions.ext_redis import redis_client
from extensions.ext_storage import storage
from libs import helper
from libs.datetime_utils import naive_utc_now
from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegment
from models.dataset import Document as DatasetDocument
from models.model import UploadFile
@@ -87,7 +87,7 @@ class IndexingRunner:
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = "error"
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
except ObjectDeletedError:
logging.warning("Document deleted, document id: %s", dataset_document.id)
@@ -95,7 +95,7 @@ class IndexingRunner:
logging.exception("consume document failed")
dataset_document.indexing_status = "error"
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
def run_in_splitting_status(self, dataset_document: DatasetDocument):
@@ -150,13 +150,13 @@ class IndexingRunner:
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = "error"
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = "error"
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
@@ -225,13 +225,13 @@ class IndexingRunner:
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = "error"
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = "error"
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
def indexing_estimate(
@@ -400,7 +400,7 @@ class IndexingRunner:
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum(len(text_doc.page_content) for text_doc in text_docs),
DatasetDocument.parsing_completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
DatasetDocument.parsing_completed_at: naive_utc_now(),
},
)
@@ -583,7 +583,7 @@ class IndexingRunner:
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
DatasetDocument.completed_at: naive_utc_now(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
DatasetDocument.error: None,
},
@@ -608,7 +608,7 @@ class IndexingRunner:
{
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
DocumentSegment.completed_at: naive_utc_now(),
}
)
@@ -639,7 +639,7 @@ class IndexingRunner:
{
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
DocumentSegment.completed_at: naive_utc_now(),
}
)
@@ -727,7 +727,7 @@ class IndexingRunner:
doc_store.add_documents(docs=documents, save_child=dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX)
# update document status to indexing
cur_time = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
cur_time = naive_utc_now()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
@@ -742,7 +742,7 @@ class IndexingRunner:
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
DocumentSegment.indexing_at: naive_utc_now(),
},
)
pass

View File

@@ -1,6 +1,5 @@
"""Abstract interface for document loader implementations."""
import datetime
import logging
import mimetypes
import os
@@ -19,6 +18,7 @@ from core.rag.extractor.extractor_base import BaseExtractor
from core.rag.models.document import Document
from extensions.ext_database import db
from extensions.ext_storage import storage
from libs.datetime_utils import naive_utc_now
from models.enums import CreatorUserRole
from models.model import UploadFile
@@ -117,10 +117,10 @@ class WordExtractor(BaseExtractor):
mime_type=mime_type or "",
created_by=self.user_id,
created_by_role=CreatorUserRole.ACCOUNT,
created_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
created_at=naive_utc_now(),
used=True,
used_by=self.user_id,
used_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
used_at=naive_utc_now(),
)
db.session.add(upload_file)

View File

@@ -6,12 +6,14 @@ implementation details like tenant_id, app_id, etc.
"""
from collections.abc import Mapping
from datetime import UTC, datetime
from datetime import datetime
from enum import StrEnum
from typing import Any, Optional
from pydantic import BaseModel, Field
from libs.datetime_utils import naive_utc_now
class WorkflowType(StrEnum):
"""
@@ -60,7 +62,7 @@ class WorkflowExecution(BaseModel):
Calculate elapsed time in seconds.
If workflow is not finished, use current time.
"""
end_time = self.finished_at or datetime.now(UTC).replace(tzinfo=None)
end_time = self.finished_at or naive_utc_now()
return (end_time - self.started_at).total_seconds()
@classmethod

View File

@@ -1,5 +1,5 @@
import uuid
from datetime import UTC, datetime
from datetime import datetime
from enum import Enum
from typing import Optional
@@ -7,6 +7,7 @@ from pydantic import BaseModel, Field
from core.workflow.entities.node_entities import NodeRunResult
from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus
from libs.datetime_utils import naive_utc_now
class RouteNodeState(BaseModel):
@@ -71,7 +72,7 @@ class RouteNodeState(BaseModel):
raise Exception(f"Invalid route status {run_result.status}")
self.node_run_result = run_result
self.finished_at = datetime.now(UTC).replace(tzinfo=None)
self.finished_at = naive_utc_now()
class RuntimeRouteState(BaseModel):
@@ -89,7 +90,7 @@ class RuntimeRouteState(BaseModel):
:param node_id: node id
"""
state = RouteNodeState(node_id=node_id, start_at=datetime.now(UTC).replace(tzinfo=None))
state = RouteNodeState(node_id=node_id, start_at=naive_utc_now())
self.node_state_mapping[state.id] = state
return state

View File

@@ -6,7 +6,6 @@ import uuid
from collections.abc import Generator, Mapping
from concurrent.futures import ThreadPoolExecutor, wait
from copy import copy, deepcopy
from datetime import UTC, datetime
from typing import Any, Optional, cast
from flask import Flask, current_app
@@ -51,6 +50,7 @@ from core.workflow.nodes.base import BaseNode
from core.workflow.nodes.end.end_stream_processor import EndStreamProcessor
from core.workflow.nodes.enums import ErrorStrategy, FailBranchSourceHandle
from core.workflow.nodes.event import RunCompletedEvent, RunRetrieverResourceEvent, RunStreamChunkEvent
from libs.datetime_utils import naive_utc_now
from libs.flask_utils import preserve_flask_contexts
from models.enums import UserFrom
from models.workflow import WorkflowType
@@ -640,7 +640,7 @@ class GraphEngine:
while should_continue_retry and retries <= max_retries:
try:
# run node
retry_start_at = datetime.now(UTC).replace(tzinfo=None)
retry_start_at = naive_utc_now()
# yield control to other threads
time.sleep(0.001)
event_stream = node.run()

View File

@@ -4,7 +4,7 @@ import time
import uuid
from collections.abc import Generator, Mapping, Sequence
from concurrent.futures import Future, wait
from datetime import UTC, datetime
from datetime import datetime
from queue import Empty, Queue
from typing import TYPE_CHECKING, Any, Optional, cast
@@ -41,6 +41,7 @@ from core.workflow.nodes.enums import ErrorStrategy, NodeType
from core.workflow.nodes.event import NodeEvent, RunCompletedEvent
from core.workflow.nodes.iteration.entities import ErrorHandleMode, IterationNodeData
from factories.variable_factory import build_segment
from libs.datetime_utils import naive_utc_now
from libs.flask_utils import preserve_flask_contexts
from .exc import (
@@ -179,7 +180,7 @@ class IterationNode(BaseNode):
thread_pool_id=self.thread_pool_id,
)
start_at = datetime.now(UTC).replace(tzinfo=None)
start_at = naive_utc_now()
yield IterationRunStartedEvent(
iteration_id=self.id,
@@ -428,7 +429,7 @@ class IterationNode(BaseNode):
"""
run single iteration
"""
iter_start_at = datetime.now(UTC).replace(tzinfo=None)
iter_start_at = naive_utc_now()
try:
rst = graph_engine.run()
@@ -505,7 +506,7 @@ class IterationNode(BaseNode):
variable_pool.add([self.node_id, "index"], next_index)
if next_index < len(iterator_list_value):
variable_pool.add([self.node_id, "item"], iterator_list_value[next_index])
duration = (datetime.now(UTC).replace(tzinfo=None) - iter_start_at).total_seconds()
duration = (naive_utc_now() - iter_start_at).total_seconds()
iter_run_map[iteration_run_id] = duration
yield IterationRunNextEvent(
iteration_id=self.id,
@@ -526,7 +527,7 @@ class IterationNode(BaseNode):
if next_index < len(iterator_list_value):
variable_pool.add([self.node_id, "item"], iterator_list_value[next_index])
duration = (datetime.now(UTC).replace(tzinfo=None) - iter_start_at).total_seconds()
duration = (naive_utc_now() - iter_start_at).total_seconds()
iter_run_map[iteration_run_id] = duration
yield IterationRunNextEvent(
iteration_id=self.id,
@@ -602,7 +603,7 @@ class IterationNode(BaseNode):
if next_index < len(iterator_list_value):
variable_pool.add([self.node_id, "item"], iterator_list_value[next_index])
duration = (datetime.now(UTC).replace(tzinfo=None) - iter_start_at).total_seconds()
duration = (naive_utc_now() - iter_start_at).total_seconds()
iter_run_map[iteration_run_id] = duration
yield IterationRunNextEvent(
iteration_id=self.id,

View File

@@ -1,5 +1,4 @@
from collections.abc import Sequence
from datetime import UTC, datetime
from typing import Optional, cast
from sqlalchemy import select, update
@@ -20,6 +19,7 @@ from core.variables.segments import ArrayAnySegment, ArrayFileSegment, FileSegme
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.nodes.llm.entities import ModelConfig
from libs.datetime_utils import naive_utc_now
from models import db
from models.model import Conversation
from models.provider import Provider, ProviderType
@@ -149,7 +149,7 @@ def deduct_llm_quota(tenant_id: str, model_instance: ModelInstance, usage: LLMUs
)
.values(
quota_used=Provider.quota_used + used_quota,
last_used=datetime.now(tz=UTC).replace(tzinfo=None),
last_used=naive_utc_now(),
)
)
session.execute(stmt)

View File

@@ -2,7 +2,7 @@ import json
import logging
import time
from collections.abc import Generator, Mapping, Sequence
from datetime import UTC, datetime
from datetime import datetime
from typing import TYPE_CHECKING, Any, Literal, Optional, cast
from configs import dify_config
@@ -36,6 +36,7 @@ from core.workflow.nodes.event import NodeEvent, RunCompletedEvent
from core.workflow.nodes.loop.entities import LoopNodeData
from core.workflow.utils.condition.processor import ConditionProcessor
from factories.variable_factory import TypeMismatchError, build_segment_with_type
from libs.datetime_utils import naive_utc_now
if TYPE_CHECKING:
from core.workflow.entities.variable_pool import VariablePool
@@ -143,7 +144,7 @@ class LoopNode(BaseNode):
thread_pool_id=self.thread_pool_id,
)
start_at = datetime.now(UTC).replace(tzinfo=None)
start_at = naive_utc_now()
condition_processor = ConditionProcessor()
# Start Loop event
@@ -171,7 +172,7 @@ class LoopNode(BaseNode):
try:
check_break_result = False
for i in range(loop_count):
loop_start_time = datetime.now(UTC).replace(tzinfo=None)
loop_start_time = naive_utc_now()
# run single loop
loop_result = yield from self._run_single_loop(
graph_engine=graph_engine,
@@ -185,7 +186,7 @@ class LoopNode(BaseNode):
start_at=start_at,
inputs=inputs,
)
loop_end_time = datetime.now(UTC).replace(tzinfo=None)
loop_end_time = naive_utc_now()
single_loop_variable = {}
for key, selector in loop_variable_selectors.items():