diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py
index a6da64ea85b..2c095782e73 100644
--- a/aws_lambda_powertools/metrics/metrics.py
+++ b/aws_lambda_powertools/metrics/metrics.py
@@ -8,6 +8,8 @@
 
 logger = logging.getLogger(__name__)
 
+is_cold_start = True
+
 
 class Metrics(MetricManager):
     """Metrics create an EMF object with up to 100 metrics
@@ -80,7 +82,7 @@ def clear_metrics(self):
         self.metric_set.clear()
         self.dimension_set.clear()
 
-    def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None):
+    def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False):
         """Decorator to serialize and publish metrics at the end of a function execution.
 
         Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler).
@@ -107,10 +109,18 @@ def handler(event, context)
             Propagate error received
         """
 
+        # If handler is None we've been called with parameters
+        # Return a partial function with args filled
+        if lambda_handler is None:
+            logger.debug("Decorator called with parameters")
+            return functools.partial(self.log_metrics, capture_cold_start_metric=capture_cold_start_metric)
+
         @functools.wraps(lambda_handler)
-        def decorate(*args, **kwargs):
+        def decorate(event, context):
             try:
-                response = lambda_handler(*args, **kwargs)
+                response = lambda_handler(event, context)
+                if capture_cold_start_metric:
+                    self.__add_cold_start_metric(context=context)
             finally:
                 metrics = self.serialize_metric_set()
                 self.clear_metrics()
@@ -120,3 +130,18 @@ def decorate(*args, **kwargs):
             return response
 
         return decorate
+
+    def __add_cold_start_metric(self, context: Any):
+        """Add cold start metric and function_name dimension
+
+        Parameters
+        ----------
+        context : Any
+            Lambda context
+        """
+        global is_cold_start
+        if is_cold_start:
+            logger.debug("Adding cold start metric and function_name dimension")
+            self.add_metric(name="ColdStart", value=1, unit="Count")
+            self.add_dimension(name="function_name", value=context.function_name)
+            is_cold_start = False
diff --git a/docs/content/core/metrics.mdx b/docs/content/core/metrics.mdx
index 3bce811e88f..bd1de65f88c 100644
--- a/docs/content/core/metrics.mdx
+++ b/docs/content/core/metrics.mdx
@@ -147,6 +147,22 @@ print(json.dumps(your_metrics_object))
 # highlight-end
 ```
 
+## Capturing cold start metric
+
+You can capture cold start metrics automatically with `log_metrics` via `capture_cold_start_metric` param.
+
+```python:title=lambda_handler.py
+from aws_lambda_powertools.metrics import Metrics, MetricUnit
+
+metrics = Metrics(service="ExampleService")
+
+@metrics.log_metrics(capture_cold_start_metric=True) # highlight-line
+def lambda_handler(evt, ctx):
+    ...
+```
+
+If it's a cold start, this feature will add a metric named `ColdStart` and a dimension named `function_name`.
+
 ## Testing your code
 
 Use `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` env vars when unit testing your code to ensure metric namespace and dimension objects are created, and your code doesn't fail validation.
diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py
index 25024b3cfbb..7f3a57c9538 100644
--- a/tests/functional/test_metrics.py
+++ b/tests/functional/test_metrics.py
@@ -1,4 +1,5 @@
 import json
+from collections import namedtuple
 from typing import Any, Dict, List
 
 import pytest
@@ -585,3 +586,50 @@ def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension, namesp
 
     # THEN namespace should match the explicitly passed variable and not the env var
     assert expected["_aws"] == output["_aws"]
+
+
+def test_emit_cold_start_metric(capsys, namespace):
+    # GIVEN Metrics is initialized
+    my_metrics = Metrics()
+    my_metrics.add_namespace(**namespace)
+
+    # WHEN log_metrics is used with capture_cold_start_metric
+    @my_metrics.log_metrics(capture_cold_start_metric=True)
+    def lambda_handler(evt, context):
+        return True
+
+    LambdaContext = namedtuple("LambdaContext", "function_name")
+    lambda_handler({}, LambdaContext("example_fn"))
+
+    output = json.loads(capsys.readouterr().out.strip())
+
+    # THEN ColdStart metric and function_name dimension should be logged
+    assert output["ColdStart"] == 1
+    assert output["function_name"] == "example_fn"
+
+
+def test_emit_cold_start_metric_only_once(capsys, namespace, dimension, metric):
+    # GIVEN Metrics is initialized
+    my_metrics = Metrics()
+    my_metrics.add_namespace(**namespace)
+
+    # WHEN log_metrics is used with capture_cold_start_metric
+    # and handler is called more than once
+    @my_metrics.log_metrics(capture_cold_start_metric=True)
+    def lambda_handler(evt, context):
+        my_metrics.add_metric(**metric)
+        my_metrics.add_dimension(**dimension)
+
+    LambdaContext = namedtuple("LambdaContext", "function_name")
+    lambda_handler({}, LambdaContext("example_fn"))
+    capsys.readouterr().out.strip()
+
+    # THEN ColdStart metric and function_name dimension should be logged
+    # only once
+    lambda_handler({}, LambdaContext("example_fn"))
+
+    output = json.loads(capsys.readouterr().out.strip())
+
+    assert "ColdStart" not in output
+
+    assert "function_name" not in output