14
14
#include " stdafx.h"
15
15
16
16
#if !defined(_WIN32) || CPPREST_FORCE_PPLX
17
-
18
17
#include " pplx/pplx.h"
19
-
20
- // Disable false alarm code analyze warning
21
- #if defined(_MSC_VER)
22
- #pragma warning(disable : 26165 26110)
23
- #endif
18
+ #include < atomic>
24
19
25
20
namespace pplx
26
21
{
@@ -36,24 +31,16 @@ class _Spin_lock
36
31
37
32
void lock ()
38
33
{
39
- if ( details::atomic_compare_exchange ( _M_lock, 1l , 0l ) != 0l )
34
+ while ( _M_lock. test_and_set () )
40
35
{
41
- do
42
- {
43
- pplx::details::platform::YieldExecution ();
44
-
45
- } while (details::atomic_compare_exchange (_M_lock, 1l , 0l ) != 0l );
36
+ pplx::details::platform::YieldExecution ();
46
37
}
47
38
}
48
39
49
- void unlock ()
50
- {
51
- // fence for release semantics
52
- details::atomic_exchange (_M_lock, 0l );
53
- }
40
+ void unlock () { _M_lock.clear (); }
54
41
55
42
private:
56
- atomic_long _M_lock;
43
+ std::atomic_flag _M_lock;
57
44
};
58
45
59
46
typedef ::pplx::scoped_lock<_Spin_lock> _Scoped_spin_lock;
@@ -63,59 +50,65 @@ static struct _pplx_g_sched_t
63
50
{
64
51
typedef std::shared_ptr<pplx::scheduler_interface> sched_ptr;
65
52
66
- _pplx_g_sched_t () { m_state = post_ctor; }
53
+ _pplx_g_sched_t () { m_state. store ( post_ctor, std::memory_order_relaxed) ; }
67
54
68
- ~_pplx_g_sched_t () { m_state = post_dtor; }
55
+ ~_pplx_g_sched_t () { m_state. store ( post_dtor, std::memory_order_relaxed) ; }
69
56
70
57
sched_ptr get_scheduler ()
71
58
{
72
- switch (m_state)
59
+ sched_ptr result;
60
+ switch (m_state.load (std::memory_order_relaxed))
73
61
{
74
62
case post_ctor:
75
63
// This is the 99.9% case.
76
-
77
- if (!m_scheduler)
78
64
{
79
65
::pplx::details::_Scoped_spin_lock lock (m_spinlock);
80
66
if (!m_scheduler)
81
67
{
82
68
m_scheduler = std::make_shared<::pplx::default_scheduler_t >();
83
69
}
84
- }
85
70
86
- return m_scheduler;
71
+ result = m_scheduler;
72
+ } // unlock
73
+
74
+ break ;
87
75
default :
88
76
// This case means the global m_scheduler is not available.
89
77
// We spin off an individual scheduler instead.
90
- return std::make_shared<::pplx::default_scheduler_t >();
78
+ result = std::make_shared<::pplx::default_scheduler_t >();
79
+ break ;
91
80
}
81
+
82
+ return result;
92
83
}
93
84
94
85
void set_scheduler (sched_ptr scheduler)
95
86
{
96
- if (m_state == pre_ctor || m_state == post_dtor)
87
+ const auto localState = m_state.load (std::memory_order_relaxed);
88
+ if (localState == pre_ctor || localState == post_dtor)
97
89
{
98
90
throw invalid_operation (" Scheduler cannot be initialized now" );
99
91
}
100
92
101
93
::pplx::details::_Scoped_spin_lock lock (m_spinlock);
102
94
103
- if (m_scheduler != nullptr )
95
+ if (m_scheduler)
104
96
{
105
97
throw invalid_operation (" Scheduler is already initialized" );
106
98
}
107
99
108
100
m_scheduler = std::move (scheduler);
109
101
}
110
102
111
- enum
103
+ enum m_state_values
112
104
{
113
- pre_ctor = 0 ,
114
- post_ctor = 1 ,
115
- post_dtor = 2
116
- } m_state ;
105
+ pre_ctor,
106
+ post_ctor,
107
+ post_dtor
108
+ };
117
109
118
110
private:
111
+ std::atomic<m_state_values> m_state;
119
112
pplx::details::_Spin_lock m_spinlock;
120
113
sched_ptr m_scheduler;
121
114
} _pplx_g_sched;
0 commit comments