朝暮的闲暇时刻


  • 首页

  • 归档

重温数据结构之树

发表于 2018-05-03 | 分类于 数据结构

0x01 树的定义

  树是一种非线性的数据结构。树是由一个或多个结点组成的有序结合,其中必有一个称为根(Root)的结点;剩下的结点被分成n>=0个互不相交的集合T1、T2…Tn,而且这些集合的每一个又都是树。树T1、T2…Tn称为根的子树。
  树至少有一个结点(Root)。
  树的度:所有结点中度的最大值。结点的度为结点拥有的子树数,度为0的结点称为叶结点,不为0的结点称为分支结点。
  树的深度:组成该树各结点的最大层次。
  有序树:树中结点的各子树从左向右是有次序的,子树间不能互换位置;否则为无序树。

0x02 二叉树的定义

  每个结点至多只有两颗子树,度不能大于2,并且子树有左右之分,左右次序不能改变,这样的树称之为二叉树。
  二叉树的五种基本形态:

  • 空二叉树
  • 只有一个根结点的二叉树
  • 只有左子树
  • 只有右子树
  • 完全二叉树
      一颗深度为k且有2k-1个结点的二叉树称为满二叉树;对二叉树的结点进行连续编号,约定编号从根结点起,自上而下,自左而右,深度为k,有n个结点的二叉树,当且仅当其每一个结点都与深度k的满二叉树中编号从1到n的结点一一对应时,称之为完全二叉树。
    完全二叉树示意图
      二叉树的第i层上至多有2k-1个结点;深度为k的二叉树至多有2k-1个结点;具有n个结点的完全二叉树的深度为log2>n+1。
      具体示例代码如下,其中插入的时候用到了路线,即如果走到插入的位置,比如往左走即0,往右走即1
    插入节点路线示意图
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    #define BT_LEFT 0
    #define BT_RIGHT 1

    #define SUCCESS 1000
    #define ERROR -1000
    #define STATUS int

    typedef unsigned long long BTPos;

    typedef struct _bTreeNode BTreeNode;
    struct _bTreeNode {
    BTreeNode *left;
    BTreeNode *right;
    BTreeNode *next;
    int value;
    };

    typedef struct _bTree BTree;
    struct _bTree {
    int count;
    BTreeNode *root;
    };

    BTree* BTree_Create() {
    BTree *tree = (BTree *)malloc(sizeof(BTree));
    if(tree != NULL) {
    tree->count = 0;
    tree->root = NULL;
    }
    return tree;
    }

    // 创建树
    void BTree_Destroy(BTree *tree) {
    if(tree == NULL) return;

    free(tree);
    }

    void BTree_Clear(BTree *tree) {
    if(tree == NULL) return;

    tree->count = 0;
    tree->root = NULL;
    }

    int Recursive_Count(BTreeNode *node) {
    if(node == NULL) return 0;

    return Recursive_Count(node->left) + 1 + Recursive_Count(node->right);
    }

    int Recursive_Height(BTreeNode *node) {
    if(node == NULL) return 0;

    int lh = Recursive_Height(node->left);
    int rh = Recursive_Height(node->right);

    return ((lh > rh) ? lh : rh) + 1;
    }

    int Recursive_Degree(BTreeNode *node) {
    if(node == NULL) return 0;

    int num = 0;

    if(node->left != NULL) num++;
    if(node->right != NULL) num++;

    if(num == 1) { // 二叉树度最多为2
    int ld = Recursive_Degree(node->left);
    int rd = Recursive_Degree(node->right);

    if(num < ld) {
    num = ld;
    }

    if(num < rd) {
    num = rd;
    }
    }

    return num;
    }
    // pos 路线图,count走几步,flag放在左边还是右边
    STATUS BTree_Insert(BTree *tree, BTreeNode *node, BTPos pos, int count, int flag) {
    if(tree == NULL || node == NULL || (flag != BT_LEFT && flag != BT_RIGHT)) return ERROR;

    int bit = 0; // 作为目标
    BTreeNode *parent = NULL;
    BTreeNode *current = tree->root;

    node->left = NULL;
    node->right = NULL;

    while (count > 0 && current != NULL) {
    bit = pos & 1;
    pos = pos >> 1;

    parent = current;

    if (bit == BT_LEFT) {
    current = current->left;
    }else if (bit == BT_RIGHT) {
    current = current->right;
    }
    count--;
    }

    if(flag == BT_LEFT) {
    node->left = current;
    }else if (flag == BT_RIGHT) {
    node->right = current;
    }

    if(parent == NULL) { // 插入的是根节点
    tree->root = node;
    }else{
    if(bit == BT_LEFT) {
    parent->left = node;
    }else if (bit == BT_RIGHT) {
    parent->right = node;
    }
    }
    tree->count++;

    return SUCCESS;
    }

    STATUS BTree_Delete(BTree *tree, BTPos pos, int count) {
    if(tree == NULL) return ERROR;

    int bit = 0;

    BTreeNode *parent = NULL;
    BTreeNode *current = tree->root;

    while (count > 0 && current != NULL) {
    bit = pos & 1;
    pos = pos >> 1;

    parent = current;

    if (bit == BT_LEFT) {
    current = current->left;
    }else if (bit == BT_RIGHT) {
    current = current->right;
    }
    count--;
    }

    if (parent == NULL) {
    tree->root = NULL;
    }else{
    if(bit == BT_LEFT){
    parent->left = NULL;
    }else if (bit == BT_RIGHT) {
    parent->right = NULL;
    }
    }

    tree->count -= Recursive_Count(current);

    return SUCCESS;
    }

    BTreeNode* BTree_Get(BTree *tree, BTPos pos, int count) {
    if(tree == NULL) return NULL;

    BTreeNode *node = NULL;

    int bit = 0;

    BTreeNode *current = tree->root;
    while (count > 0 && current != NULL) {
    bit = pos & 1;
    pos = pos >> 1;

    if (bit == BT_LEFT) {
    current = current->left;
    }else if (bit == BT_RIGHT) {
    current = current->right;
    }
    count--;
    }

    node = current;

    return node;
    }

    BTreeNode* BTree_Root(BTree *tree) {
    return tree->root;
    }

    int BTree_Height(BTree *tree) {
    if(tree == NULL) return 0;

    return Recursive_Height(tree->root);
    }

    int BTree_Count(BTree *tree) {
    if(tree == NULL) return 0;

    return tree->count;
    }

    int BTree_Degree(BTree *tree) {
    if(tree == NULL) return 0;

    return Recursive_Degree(tree->root);
    }

    - (void)viewDidLoad {
    [super viewDidLoad];

    BTree *tree = BTree_Create();

    struct _bTreeNode n1 = {NULL, NULL, NULL, 11};
    struct _bTreeNode n2 = {NULL, NULL, NULL, 25};
    struct _bTreeNode n3 = {NULL, NULL, NULL, 73};
    struct _bTreeNode n4 = {NULL, NULL, NULL, 38};
    struct _bTreeNode n5 = {NULL, NULL, NULL, 97};

    BTree_Insert(tree, &n1, 0, 0, BT_LEFT);
    BTree_Insert(tree, &n2, 0, 1, BT_LEFT);
    BTree_Insert(tree, &n3, 1, 1, BT_LEFT);
    BTree_Insert(tree, &n4, 00, 2, BT_LEFT);
    BTree_Insert(tree, &n5, 10, 2, BT_LEFT);

    BTree_Destroy(tree);
    }

0x03 前序、中序、后序和层次遍历

  前序遍历:又称深度优先遍历,首先访问根节点数据,然后访问左子树,最后访问右子树。
前序遍历示意图
  中序遍历:首先访问左子树,然后访问根节点数据,最后访问右子树。
中序遍历示意图
  后序遍历:首先访问左子树,然后访问右子树,最后访问根节点数据。
后序遍历示意图
  层次遍历:又称广度优先遍历,一层一层访问节点数据。
层次遍历示意图

  代码实现:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
// 前序遍历的递归实现
void Tree_PrePrint(BTreeNode *node) {
printf("%d", node->value);
Tree_PrePrint(node->left);
Tree_PrePrint(node->right);
}

// 前序遍历的栈实现:先将右子树压栈,再将左子树压栈,这样左子树就位于栈顶,可以保证结点的左子树先与右子树被遍历
void Tree_PrePrint(BTreeNode *node) {
if(node == NULL) return;

StackList *stack = Stack_Create();
Stack_Push(stack, node);
while (!Stack_Empty(stack)) {
BTreeNode *node = Stack_Pop(stack);
printf("%d", node->value);
if(node->right) Stack_Push(stack, node->right);
if(node->left) Stack_Push(stack, node->left);
}

Stack_Destroy(stack);
}

// 中序遍历
void Tree_MiddlePrint(BTreeNode *node) {
Tree_MiddlePrint(node->left);
printf("%d", node->value);
Tree_MiddlePrint(node->right);
}

// 后序遍历
void Tree_LastPrint(BTreeNode *node) {
Tree_LastPrint(node->left);
Tree_LastPrint(node->right);
printf("%d", node->value);
}

// 层次遍历
void Tree_LevelPrint(BTreeNode *node) {
if(node == NULL) return;

QueueList *queue = Queue_Create();
Queue_Push(queue, node);
while (!Queue_Empty(queue)) {
BTreeNode *node = Queue_Pop(queue);
printf("%d", node->value);
if(node->left) Queue_Push(queue, node->left);
if(node->right) Queue_Push(queue, node->right);
}

Queue_Destroy(queue);
}

0x04 线索化二叉树

  前序遍历每个节点,并把访问的节点以链表的形式保存。详细代码不再复述。

0x05 二叉排序树

  之前一篇聊过二分查找。但当需要插入或者删除数据元素时,为了能够继续进行二分查找,需要大规模挪动有序表 中的数据元素,使得插入或者删除后的线性表保持有序。可以直接组织一棵具有二分查找特性的二叉树。

  • 二分查找过程即变换为对树结点的查找过程;

  • 由二分查找的特性可知树结点查找的时间复杂度为 O(log2n);

  • 只在叶结点处插入新结点即可保持特性不变;

  • 删除树结点时也可以容易的保持特性不变。
      二叉排序树可以是一颗空树,若它的左子树不空,那么左子树上所有结点的值均小于它的根结点的值;若它的右子树不空,那么右子树上所有结点的值均大于它的根结点的值。而且它的左右子树也分别是二叉排序树。
      插入操作,比结点的值大,就往左边走;比结点的值小,就往右边走。
      删除操作,如果是叶结点就直接删除;如果不是叶结点,有一个孩子的结点就用孩子结点替代原结点,如果大于1个孩子结点,那就用中序遍历下的直接前驱替换原结点。

    删除二叉排序树示意图

      代码实现:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    #define BT_LEFT 0
    #define BT_RIGHT 1

    #define SUCCESS 1000
    #define ERROR -1000
    #define STATUS int

    typedef void Key;
    typedef int (TreeNodeCompare)(Key*, Key*);

    typedef struct _treeNode TreeNode;
    struct _treeNode
    {
    Key* key;
    TreeNode *left;
    TreeNode *right;
    };

    typedef struct _tree Tree;
    struct _tree {
    int count;
    TreeNode *root;
    };

    int Recursive_Count(TreeNode* node) {
    if(node == NULL) return 0;

    return Recursive_Count(node->left) + 1 + Recursive_Count(node->right);
    }

    int Recursive_Height(TreeNode* node) {
    if(node == NULL) return 0;

    int lh = Recursive_Height(node->left);
    int rh = Recursive_Height(node->right);
    return ((lh > rh) ? lh : rh) + 1;
    }

    int Recursive_Degree(TreeNode* node) {
    if(node == NULL) return 0;

    int degree = 0;

    if(node->left != NULL) degree++;
    if(node->right != NULL) degree++;

    if(degree == 1) {
    int ld = Recursive_Degree(node->left);
    int rd = Recursive_Degree(node->right);

    if(degree < ld) degree = ld;
    if(degree < rd) degree = rd;
    }

    return degree;
    }

    STATUS Recursive_Insert(TreeNode *node, TreeNode *waitNode, TreeNodeCompare *compare) {
    if(node == NULL || waitNode == NULL) return ERROR;

    int ret = compare(node->key, waitNode->key);
    if(ret == 0) return ERROR;

    if(ret < 0) {
    if(node->left != NULL) {
    ret = Recursive_Insert(node->left, waitNode, compare);
    }else{
    node->left = waitNode;
    }
    }else{
    if(node->right != NULL) {
    ret = Recursive_Insert(node->right, waitNode, compare);
    }else{
    node->right = waitNode;
    }
    }

    return ret != 0 ? SUCCESS : ERROR;
    }

    TreeNode* Recursive_Get(TreeNode *node, Key *key, TreeNodeCompare *compare) {
    if(node == NULL || key == NULL) return NULL;

    TreeNode *gNode = NULL;

    int ret = compare(node->key, key);
    if(ret == 0) {
    gNode = node;
    }else if (ret < 0) {
    gNode = Recursive_Get(node->left, key, compare);
    }else{
    gNode = Recursive_Get(node->right, key, compare);
    }

    return gNode;
    }

    TreeNode* Recursive_Delete_Node(TreeNode **pNode) {
    TreeNode *node = *pNode;
    // 如果左结点空的话,右子树接上删除结点的位置
    if((*pNode)->left == NULL) {
    *pNode = (*pNode)->right;
    // 如果右结点空的话,左子树接上删除结点的位置
    }else if((*pNode)->right == NULL) {
    *pNode = (*pNode)->left;
    // 如果左右结点都存在
    }else{
    TreeNode *g = *pNode;
    TreeNode *c = (*pNode)->left; // 这个结点是要替换到要删除的结点上去的
    // 找到左子树右尽头,这个右尽头的结点是要替换到要删除的结点上去的
    while (c->right != NULL) {
    g = c;
    c = c->right;
    }

    if(g != *pNode) {
    // c是右尽头,所以不会有右子树,因为这个结点要被移植到删除的结点位置上,所以要把其左结点移植到要删除结点的右边
    g->right = c->left;
    }else{
    // c是右尽头,所以把c的左子树移植到被删除结点的左边,实际就是清空c下面的结点挂靠关系
    g->left = c->left;
    }
    // 把删除结点的左右结点移植到新的结点上
    c->left = (*pNode)->left;
    c->right = (*pNode)->right;

    *pNode = c;
    }

    return node;
    }

    TreeNode* Recursive_Delete(TreeNode **pNode, Key *key, TreeNodeCompare *compare) {
    if(pNode == NULL || *pNode == NULL || key == NULL) return NULL;

    TreeNode *node = NULL;

    int ret = compare(key, node->key);
    if(ret == 0) {
    node = Recursive_Delete_Node(pNode);
    }else if(ret < 0) {
    node = Recursive_Delete(&((*pNode)->left), key, compare);
    }else{
    node = Recursive_Delete(&((*pNode)->right), key, compare);
    }

    return node;
    }

    Tree* Tree_Create() {
    Tree *tree = (Tree *)malloc(sizeof(Tree));

    if(tree != NULL) {
    tree->root = NULL;
    tree->count = 0;
    }

    return tree;
    }

    void Tree_Destroy(Tree *tree) {
    free(tree);
    }

    STATUS Tree_Insert(Tree *tree, TreeNode *node, TreeNodeCompare *compare) {
    if(tree == NULL || node == NULL) return ERROR;

    int ret = SUCCESS;

    node->left = NULL;
    node->right = NULL;

    if(tree->root == NULL) {
    tree->root = node;
    }else{
    ret = Recursive_Insert(tree->root, node, compare);
    }

    if(ret == SUCCESS) tree->count++;

    return ret;
    }

    TreeNode* Tree_Delete(Tree *tree, Key *key, TreeNodeCompare *compare) {
    if(tree == NULL || key == NULL) return NULL;

    TreeNode *node = NULL;

    node = Recursive_Delete(&tree->root, key, compare);

    return node;
    }

    TreeNode* Tree_Get(Tree *tree, Key *key, TreeNodeCompare *compare) {
    if(tree == NULL || key == NULL) return NULL;

    TreeNode *node = NULL;

    node = Recursive_Get(tree->root, key, compare);

    return node;
    }

    int Tree_Count(Tree *tree) {
    return Recursive_Count(tree->root);
    }

    int Tree_Height(Tree *tree) {
    return Recursive_Height(tree->root);
    }

    int Tree_Degree(Tree *tree) {
    return Recursive_Degree(tree->root);
    }

    int Compare_Key(Key* k1, Key* k2) {
    return (int)k1 - (int)k2;
    }

  二叉排序树时间复杂度为O(logn)。虽然二叉排序树解决了既有数组的取值高效,又有链表增删的高效,但当所有节点只有左节点(如果插入节点集本身是大到小排列);或所有节点只有右节点(如果插入节点集本身是小到大排列)。在这种情况 下,排序二叉树就变成了普通链表,其检索效率就会很差。需要引出AVL树和红黑树这两种平衡二叉树了。

0x06 树状打印二叉树

   对于打印二叉树,我们可以按照层次打印来打印顺序,换行的话我们可以比较当前结点的位置是否一致来判断是不是要换行。如果要获取结点的位置,我们需要改动点树的结构,结点新加一个成员变量parent,表示父结点。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
#define SUCCESS 1000
#define ERROR -1000
#define STATUS int

typedef void Key;
typedef int (TreeNodeCompare)(Key*, Key*);

typedef struct _treeNode TreeNode;
struct _treeNode
{
Key* key;
TreeNode *left;
TreeNode *right;
TreeNode *parent;
TreeNode *next;
};

typedef struct _tree Tree;
struct _tree {
int count;
TreeNode *root;
};

int Recursive_Count(TreeNode* node) {
if(node == NULL) return 0;

return Recursive_Count(node->left) + 1 + Recursive_Count(node->right);
}

int Recursive_Height(TreeNode* node) {
if(node == NULL) return 0;

int lh = Recursive_Height(node->left);
int rh = Recursive_Height(node->right);
return ((lh > rh) ? lh : rh) + 1;
}

int Recursive_Degree(TreeNode* node) {
if(node == NULL) return 0;

int degree = 0;

if(node->left != NULL) degree++;
if(node->right != NULL) degree++;

if(degree == 1) {
int ld = Recursive_Degree(node->left);
int rd = Recursive_Degree(node->right);

if(degree < ld) degree = ld;
if(degree < rd) degree = rd;
}

return degree;
}

STATUS Recursive_Insert(TreeNode *node, TreeNode *waitNode, TreeNodeCompare *compare) {
if(node == NULL || waitNode == NULL) return ERROR;

int ret = compare(node->key, waitNode->key);
if(ret == 0) return ERROR;

if(ret < 0) {
if(node->left != NULL) {
ret = Recursive_Insert(node->left, waitNode, compare);
}else{
waitNode->parent = node;
node->left = waitNode;
}
}else{
if(node->right != NULL) {
ret = Recursive_Insert(node->right, waitNode, compare);
}else{
waitNode->parent = node;
node->right = waitNode;
}
}

return ret != 0 ? SUCCESS : ERROR;
}

TreeNode* Recursive_Get(TreeNode *node, Key *key, TreeNodeCompare *compare) {
if(node == NULL || key == NULL) return NULL;

TreeNode *gNode = NULL;

int ret = compare(node->key, key);
if(ret == 0) {
gNode = node;
}else if (ret < 0) {
gNode = Recursive_Get(node->left, key, compare);
}else{
gNode = Recursive_Get(node->right, key, compare);
}

return gNode;
}

TreeNode* Recursive_Delete_Node(TreeNode **pNode) {
TreeNode *node = *pNode;

if((*pNode)->left == NULL) {
(*pNode)->right->parent = (*pNode)->parent;
*pNode = (*pNode)->right;
}else if((*pNode)->right == NULL) {
(*pNode)->left->parent = (*pNode)->parent;
*pNode = (*pNode)->left;
}else{
TreeNode *g = *pNode;
TreeNode *c = (*pNode)->left;
while (c->right != NULL) { // 找到左子树的右尽头(找到直接前驱)
g = c;
c = c->right;
}

if(g != *pNode) {
g->right = c->left;
}else{
g->left = c->left;
}
c->left = (*pNode)->left;
c->right = (*pNode)->right;
c->parent = (*pNode)->parent;

*pNode = c;
}

return node;
}

TreeNode* Recursive_Delete(TreeNode **pNode, Key *key, TreeNodeCompare *compare) {
if(pNode == NULL || *pNode == NULL || key == NULL) return NULL;

TreeNode *node = NULL;

int ret = compare(key, node->key);
if(ret == 0) {
node = Recursive_Delete_Node(pNode);
}else if(ret < 0) {
node = Recursive_Delete(&((*pNode)->left), key, compare);
}else{
node = Recursive_Delete(&((*pNode)->right), key, compare);
}

return node;
}

Tree* Tree_Create() {
Tree *tree = (Tree *)malloc(sizeof(Tree));

if(tree != NULL) {
tree->root = NULL;
tree->count = 0;
}

return tree;
}

void Tree_Destroy(Tree *tree) {
free(tree);
}

STATUS Tree_Insert(Tree *tree, TreeNode *node, TreeNodeCompare *compare) {
if(tree == NULL || node == NULL) return ERROR;

int ret = SUCCESS;

node->left = NULL;
node->right = NULL;

if(tree->root == NULL) {
tree->root = node;
node->parent = NULL;
}else{
ret = Recursive_Insert(tree->root, node, compare);
}

if(ret == SUCCESS) tree->count++;

return ret;
}

TreeNode* Tree_Delete(Tree *tree, Key *key, TreeNodeCompare *compare) {
if(tree == NULL || key == NULL) return NULL;

TreeNode *node = NULL;

node = Recursive_Delete(&tree->root, key, compare);

return node;
}

TreeNode* Tree_Get(Tree *tree, Key *key, TreeNodeCompare *compare) {
if(tree == NULL || key == NULL) return NULL;

TreeNode *node = NULL;

node = Recursive_Get(tree->root, key, compare);

return node;
}

int Tree_Count(Tree *tree) {
return Recursive_Count(tree->root);
}

int Tree_Height(Tree *tree) {
return Recursive_Height(tree->root);
}

int Tree_Degree(Tree *tree) {
return Recursive_Degree(tree->root);
}

int Compare_Key(Key* k1, Key* k2) {
return (int)k1 - (int)k2;
}

int Tree_getNodeLevel(TreeNode *node) {
if(node == NULL) return ERROR;

TreeNode *n = node;

int level = 0;
while (n != NULL) {
level++;
n = n->parent;
}
return level;
}

//----------------
typedef struct _queue {
int length;
TreeNode *front;
TreeNode *rear;
}QueueList;

QueueList* Queue_Create() {
QueueList *list = (QueueList *)malloc(sizeof(QueueList));
if(list != NULL) {
list->length = 0;
list->front = NULL;
list->rear = NULL;
}
return list;
}

void Queue_Destroy(QueueList *queue) {
free(queue);
}

STATUS Queue_Push(QueueList *list, TreeNode *node) {
bool ret = list != NULL && node != NULL;
if(!ret) return ERROR;

if(list->length == 0) {
list->front = node;
list->rear = node;
}else{
list->rear->next = node;
list->rear = node;
}
list->length++;

return SUCCESS;
}

TreeNode* Queue_Pop(QueueList *list) {
if(list == NULL || list->length == 0) return NULL;

TreeNode *node = list->front;
list->front = node->next;
list->length--;

if(list->length == 0) {
list->front = NULL;
list->rear = NULL;
}

return node;
}

bool Queue_Empty(QueueList *list) {
return list->length == 0;
}

void Node_Print(TreeNode *node, int treeHeight) {
if(node == NULL) return;

int lastNodeLine = Tree_getNodeLevel(node); // 结点所在行

QueueList *queue = Queue_Create();
Queue_Push(queue, node);
while (!Queue_Empty(queue)) {
TreeNode *node = Queue_Pop(queue);

// 获取当前结点所在行数
int nodeLine = Tree_getNodeLevel(node);
if(nodeLine != lastNodeLine) {
lastNodeLine = nodeLine;
printf("\n");
}
int step = pow(2, treeHeight - nodeLine + 1) - 1;
for (int i = 0; i < step; i++) {
printf(" ");
}
printf("%d", (int)node->key);
for (int i = 0; i < step; i++) {
printf(" ");
}
if(node->left) Queue_Push(queue, node->left);
if(node->right) Queue_Push(queue, node->right);
}

while (!Queue_Empty(queue)) {
TreeNode *node = Queue_Pop(queue);
printf("%d",(int)node->key);
}

Queue_Destroy(queue);
}

void Tree_Print(Tree *tree) {
if(tree == NULL) return;

Node_Print(tree->root, Tree_Height(tree));
}

// 测试代码如下:
Tree *tree = Tree_Create();

struct _treeNode n1 = {(Key *)21, NULL, NULL};
struct _treeNode n2 = {(Key *)62, NULL, NULL};
struct _treeNode n3 = {(Key *)8, NULL, NULL};
struct _treeNode n4 = {(Key *)39, NULL, NULL};
struct _treeNode n5 = {(Key *)12, NULL, NULL};
struct _treeNode n6 = {(Key *)24, NULL, NULL};
struct _treeNode n7 = {(Key *)95, NULL, NULL};
struct _treeNode n8 = {(Key *)47, NULL, NULL};

Tree_Insert(tree, &n1, Compare_Key);
Tree_Insert(tree, &n2, Compare_Key);
Tree_Insert(tree, &n3, Compare_Key);
Tree_Insert(tree, &n4, Compare_Key);
Tree_Insert(tree, &n5, Compare_Key);
Tree_Insert(tree, &n6, Compare_Key);
Tree_Insert(tree, &n7, Compare_Key);
Tree_Insert(tree, &n8, Compare_Key);

Tree_Print(tree);

Tree_Destroy(tree);

// 打印效果如下:
21
62 8
95 39 12
47 24

0x07 哈希表

  开发过程中,常常需要应用到映射关系,即一个key对应一个value,比如iOS中的NSDictionary,事实上数组也是一个简单又高效的哈希表。

  我们使用二叉排序树来构建一个哈希表:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
typedef void HashKey;
typedef void HashValue;
typedef struct _hashNode HashNode;
struct _hashNode {
TreeNode header;
HashValue* value;
};

typedef void Hash;
typedef int (Hash_Compare)(HashKey*, HashKey*);

Hash* Hash_Create() {
return Tree_Create();
}

STATUS Hash_Add(Hash *hash, HashKey *key, HashValue *value, Hash_Compare *compare) {
if(hash == NULL || key == NULL || value == NULL) return ERROR;

HashNode *node = (HashNode *)malloc(sizeof(HashNode));
if(node == NULL) return ERROR;

node->header.key = key;
node->value = value;

STATUS ret = Tree_Insert(hash, (TreeNode *)node, compare);

if(ret == ERROR) free(node);

return ret;
}

HashValue* Hash_Delete(Hash *hash, HashKey *key, Hash_Compare *compare) {
if(hash == NULL || key == NULL) return NULL;

HashNode *node = (HashNode *)Tree_Delete(hash, key, compare);

if(node == NULL) return NULL;

HashValue *value = node->value;
free(node);

return value;
}

HashValue* Hash_Get(Hash *hash, HashKey *key, Hash_Compare *compare) {
if(hash == NULL || key == NULL) return NULL;

HashNode *node = (HashNode *)Tree_Get(hash, key, compare);

if(node == NULL) return NULL;

HashValue *value = node->value;
free(node);

return value;
}

int Compare_NO(HashKey *k1, HashKey *k2) {
if(k1 == NULL || k2 == NULL) return ERROR;
int a = strcmp((char *)k1, (char *)k2);
return a;
}

// 测试代码:
struct Student {
char *no;
char *name;
int age;
};

Hash *hash = Hash_Create();

struct Student s1 = {"10086", "Objective-C", 22};
struct Student s2 = {"0xabcd", "Swift", 37};
struct Student s3 = {"hello", "Java", 44};
struct Student s4 = {"(*^$%)", "iOS", 12};

struct Student* s = NULL;

Hash_Add(hash, s1.no, &s1, Compare_NO);
Hash_Add(hash, s2.no, &s2, Compare_NO);
Hash_Add(hash, s3.no, &s3, Compare_NO);
Hash_Add(hash, s4.no, &s4, Compare_NO);

s = Hash_Get(hash, "hello", Compare_NO);

printf("%s", s->name);

重温数据结构之排序与查找

发表于 2018-04-25 | 分类于 数据结构

0x01 排序

1. 选择排序

  首先取出第一个值,其位置记做k,然后遍历找到比k位置的值小的值,把比k位置小的值的位置赋值给k,直到第一轮遍历结束,然后交换第一个位置与k位置的值。再取出第二个值,以此类推。
选择排序示意图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
void printNum(int array[], int length) {
for (int i = 0; i < length; i++) {
NSLog(@"数字:%d", array[i]);
}
}

void swap(int array[], int i, int j) {
int tmp = array[i];
array[i] = array[j];
array[j] = tmp;
}

void selectionSort(int array[], int length) {
int k = -1;
for (int i = 0; i < length; i++) {
k = i;
for (int j = i+1; j < length; j++) {
if(array[j] < array[k]) {
k = j;
}
}
if(i != k) swap(array, i, k);
}
}

- (void)viewDidLoad {
[super viewDidLoad];

int array[] = {21, 31, 6, 19, 87, 54, 18};
int length = sizeof(array) / sizeof(*array);
selectionSort(array, length);
printNum(array, length);
}

// 运行结果:
2018-04-25 10:17:16.803588+0800 testData[69629:150010320] 数字:6
2018-04-25 10:17:16.803741+0800 testData[69629:150010320] 数字:18
2018-04-25 10:17:16.803833+0800 testData[69629:150010320] 数字:19
2018-04-25 10:17:16.803935+0800 testData[69629:150010320] 数字:21
2018-04-25 10:17:16.804042+0800 testData[69629:150010320] 数字:31
2018-04-25 10:17:16.804150+0800 testData[69629:150010320] 数字:54
2018-04-25 10:17:16.804249+0800 testData[69629:150010320] 数字:87

2. 插入排序

  假设在序号i(i>=1)之前的元素即[0..i-1]都已经排好序,本趟需要找到i对应的元素x的正确位置k,并且在寻找这个位置k的过程中逐个将比较过的元素往后移一位,为元素x“腾位置”,最后将k对应的元素值赋为x。
插入排序示意图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
void InsertionSort(int array[], int length) {
int k = -1;
for (int i = 1; i < length; i++) {
k = i;
int temp = array[k];
for (int j = i - 1; j >= 0; j--) {
if (array[j] > temp) {
array[j+1] = array[j];
k = j;
}
}
array[k] = temp;
}
}

3. 冒泡排序

  从最后开始两两对比,小的话交换位置。
冒泡排序示意图

1
2
3
4
5
6
7
8
9
void BubbleSort(int array[], int length) {
for (int i = 0; i < length; i++) {
for (int j = length - 1; j > i; j--) {
if(array[j] < array[j-1]) {
swap(array, j, j-1);
}
}
}
}

4. 希尔排序

  希尔排序是把记录按下标的一定增量分组,对每组使用直接插入排序算法排序;随着增量逐渐减少,每组包含的关键词越来越多,当增量减至1时,整个文件恰被分成一组,算法便终止。
  这个是不稳定的一种排序方法,时间复杂度O(n*logn)。
希尔排序示意图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
void ShellSort(int array[], int length) {
int gap = length;
int k = -1;
do {
gap = gap / 3 + 1;
for (int i = gap; i < length; i++) {
k = i;
int temp = array[k];
for (int j = i - gap; j >= 0; j -= gap) {
if(temp < array[j]) {
array[i] = array[j];
k = j;
}
}
array[k] = temp;
}
}while (gap > 1);
}

5. 快速排序

  任取待排序序列中的某个数据元素(例如:第一个元素)作为基准,左边都是比基准数小的元素,右边都是比基准数大的元素。再对左右的序列进行同样的操作,直到排序正确。
  这个是不稳定的一种排序方法,时间复杂度O(n*logn)。
快速排序示意图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
int cut(int array[], int low, int high) {
if(low > high) return -1;

int pv = array[low];
while (low < high) {
while (low < high && array[high] >=pv) {
high--;
}
swap(array, low, high);
while (low < high && array[low] <= pv) {
low++;
}
swap(array, low, high);
}
return low;
}

void QSort(int array[], int low, int high) {
if(low >= high) return;

int p = cut(array, low, high);
QSort(array, low, p - 1);
QSort(array, p + 1, high);
}

void QuickSort(int array[], int length) {
QSort(array, 0, length - 1);
}

6. 归并排序

  将两个位置相邻的记录有序子序列归并为一个记录有序的序列。
归并排序示意图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
void Merge(int src[], int des[], int low, int mid, int high) {
// mid+1为第2有序区第1个元素,mid为第1有序区的最后1个元素
int i = low; // i指向第1有序区的第1个元素
int j = mid + 1; // j指向第2有序区的第1个元素,high为第2有序区的最后一个元素
int k = low;

while (i <= mid && j <= high) {
if (src[i] < src[j]) {
des[k++] = src[i++];
}else{
des[k++] = src[j++];
}
}
// 比完之后,假如第1个有序区仍有剩余,则直接全部复制到temp数组
while (i <= mid) {
des[k++] = src[i++];
}
// 比完之后,假如第2个有序区还有剩余,则直接全部复制到temp数组
while (j <= high) {
des[k++] = src[j++];
}
}

void MSort(int src[], int des[], int low, int high, int max) {
if (low == high) {
des[low] = src[low];
}else{
int mid = (low + high) / 2;
int *space = (int *)malloc(sizeof(int) * max);
if(space != NULL) {
MSort(src, space, low, mid, max);
MSort(src, space, mid+1, high, max);
Merge(space, des, low, mid, high);
}
free(space);
}
}

void MergeSort(int array[], int length) {
MSort(array, array, 0, length - 1, length);
}

0x02 查找

1. 二分查找

  首先将查找表进行排序,取表里中间的值进行比较。如果中间的值就是要查找的就结束查找;如果要查找的值小于中间值,在中间的值的右边进行二分查找;如果要查找的值大于中间值,在中间的值的左边进行二分查找。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
// 递归实现
// 返回数组下标,key为要查找的值
int Binary_Search_R(int array[], int low, int high, int key) {
if(low > high) return -1;

int index = -1;

int mid = (low + high) / 2;
if(array[mid] == key) {
index = mid;
}else if (array[mid] < key) {
index = Binary_Search_R(array, mid+1, high, key);
}else if (array[mid] > key) {
index = Binary_Search_R(array, low, mid-1, key);
}

return index;
}

// 非递归实现
int Binary_Search(int array[], int low, int high, int key) {
if(low > high) return -1;

int index = -1;

while (low <= high) {
int mid = (low + high) / 2;
if (array[mid] == key) {
index = mid;
break;
}else if (array[mid] < key) {
low = mid+1;
}else if (array[mid] > key) {
high = mid-1;
}
}

return index;
}

  但是这样的查找,如果查找的数是第一位,那么显然在1/4处开始查找要比1/2处开始查找效率要高,所以我们需要算出一个动态的中间值,而不是一味的(low + high) / 2这样算中间值。对于(low + high) / 2,等价于low + (high - low) / 2。我们可以设1/2为f(x),得出mid = low + f(x) * (high - low),f(x) = (mid - low) / (high - low)。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
int Interpolation_Search(int array[], int low, int high, int key) {
if(low > high) return -1;

int index = -1;

while (low <= high) {
float fx = 1.0f * (key - array[low]) / (array[high] - array[low]);
int mid = low + fx * (high - low);

if (array[mid] == key) {
index = mid;
break;
}else if (array[mid] < key) {
low = mid+1;
}else if (array[mid] > key) {
high = mid-1;
}
}

return index;
}

  二分查找由于是基于有序的线性表,所以效率为O(logn)。

重温数据结构之栈与队列

发表于 2018-04-24 | 分类于 数据结构

0x01 栈

  栈是允许在同一端进行插入和删除操作的特殊的数据结构。被允许进行插入和删除操作的一端称为栈顶(top),另一端为栈底(bottom)。栈底固定,而栈顶浮动。栈中元素个数为零时称为空栈,插入称为进栈(Push),删除则称为退栈(Pop)。
  栈的一个典型特征就是先进后出。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#define ERROR   -1000
#define SUCCESS 1000

#define STATUS int

// 节点结构
typedef struct _node {
int val;
struct _node *next;
}Node;

typedef struct _stack {
int length;
struct _node *top;
}StackList;

StackList* Stack_Create() {
StackList *list = (StackList *)malloc(sizeof(StackList));
if(list != NULL) {
list->length = 0;
}
return list;
}

void Stack_Destroy(StackList *list) {
free(list);
}

void Stack_Clear(StackList *list) {
list->length = 0;
list->top = NULL;
}

int Stack_Length(StackList *list) {
return list->length;
}

STATUS Stack_Push(StackList *list, Node *node) {
bool status = list != NULL && node != NULL;
if(!status) return ERROR;

Node *currentNode = list->top;
node->next = currentNode;
list->top = node;
list->length++;

return SUCCESS;
}

Node* Stack_Pop(StackList *list) {
if(list == NULL) return NULL;

Node *currentNode = list->top;
list->top = currentNode->next;
list->length--;

return currentNode;
}

@interface ViewController ()

@end

@implementation ViewController

- (void)viewDidLoad {
[super viewDidLoad];

StackList *list = Stack_Create();

struct _node n1;
struct _node n2;
struct _node n3;
struct _node n4;
struct _node n5;

n1.val = 23;
n2.val = 48;
n3.val = 76;
n4.val = 82;
n5.val = 31;

Stack_Push(list, (Node *)&n1);
Stack_Push(list, (Node *)&n2);
Stack_Push(list, (Node *)&n3);
Stack_Push(list, (Node *)&n4);
Stack_Push(list, (Node *)&n5);

int total = list->length;
for (int i = 0; i < total; i++) {
Node *node = Stack_Pop(list);
NSLog(@"弹出值:%d", node->val);
}

Stack_Destroy(list);
}

@end

// 运行结果:
2018-04-24 14:54:22.451612+0800 testData[70524:143447403] 弹出值:31
2018-04-24 14:54:22.451817+0800 testData[70524:143447403] 弹出值:82
2018-04-24 14:54:22.451924+0800 testData[70524:143447403] 弹出值:76
2018-04-24 14:54:22.452086+0800 testData[70524:143447403] 弹出值:48
2018-04-24 14:54:22.452233+0800 testData[70524:143447403] 弹出值:23

  我们通过一个栈的实际应用继续加深理解,现在我们需要实现编译器中的符号成对检测。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
bool isLeft(char c)
{
bool ret = false;
switch(c) {
case '<':
case '(':
case '[':
case '{':
case '\'':
case '\"':
ret = true;
break;

default:
ret = false;
break;
}
return ret;
}

bool isRight(char c)
{
bool ret = false;
switch(c) {
case '>':
case ')':
case ']':
case '}':
case '\'':
case '\"':
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}

bool match(char left, char right) {
bool ret = false;

switch (left) {
case '<':
ret = (right == '>');
break;
case '(':
ret = (right == ')');
break;
case '[':
ret = (right == ']');
break;
case '{':
ret = (right == '}');
break;
case '\'':
ret = (right == '\'');
break;
case '\"':
ret = (right == '\"');
break;

default:
break;
}

return ret;
}

bool scanner(const char *code) {
bool ret = false;
int i = 0;

StackList *list = Stack_Create();
while (code[i] != '\0') {
if (isLeft(code[i])) {
Node *n = (Node *)malloc(sizeof(Node));
n->val = code[i];
Stack_Push(list, n);
}

if (isRight(code[i])) {
Node *n = Stack_Pop(list);
char c = n->val;
if(n == NULL || !match(c, code[i])) {
NSLog(@"%c没有匹配的符号", c);
}
free(n);
}

i++;
}

if (Stack_Empty(list) && code[i] == '\0') {
NSLog(@"匹配完成");
}else{
NSLog(@"代码不合法");
}

Stack_Destroy(list);

return ret;
}

@interface ViewController ()

@end

@implementation ViewController

- (void)viewDidLoad {
[super viewDidLoad];

const char* code1 = "#import<ViewController.h> ";

const char* code2 = "#import<ViewController.h"; // 把>去掉

scanner(code1); // 代码运行: 匹配完成
scanner(code2); // 代码运行: 代码不合法
}

@end

0x01 队列

  队列是一种只允许前端(front)进行删除操作,而在后端(rear)进行插入操作的数据结构。最先插入的元素将是最先被删除的元素,反之最后插入的元素将是最后被删除的元素。因此队列的特性是先进先出。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#define ERROR   -1000
#define SUCCESS 1000

#define STATUS int

// 节点结构
typedef struct _node {
void *val;
struct _node *next;
}Node;

typedef struct _queue {
int length;
struct _node *front;
struct _node *rear;
}QueueList;

QueueList* Queue_Create() {
QueueList *list = (QueueList *)malloc(sizeof(QueueList));
if(list != NULL) {
list->length = 0;
list->front = NULL;
list->rear = NULL;
}
return list;
}

STATUS Queue_Append(QueueList *list, void *val) {
bool ret = list != NULL && val != NULL;
if(!ret) return ERROR;

Node *node = (Node *)malloc(sizeof(Node));
if(!node) return ERROR;

node->val = val;

if(list->length == 0) {
list->front = node;
list->rear = node;
}else{
list->rear->next = node;
list->rear = node;
}
list->length++;

return SUCCESS;
}

void* Queue_Retrieve(QueueList *list) {
if(list == NULL || list->length == 0) return NULL;

void *ret = NULL;

Node *node = list->front;
ret = node->val;
list->front = node->next;
list->length--;

if(list->length == 0) {
list->front = NULL;
list->rear = NULL;
}

return ret;
}

bool Queue_Empty(QueueList *list) {
return list->length == 0;
}

void Queue_Clear(QueueList *list) {
while(!Queue_Empty(list)) {
Queue_Retrieve(list);
}
}

void Queue_Destroy(QueueList *list) {
Queue_Clear(list);
free(list);
}


@interface ViewController ()

@end

@implementation ViewController

- (void)viewDidLoad {
[super viewDidLoad];

QueueList *list = Queue_Create();

Queue_Append(list, (void *)12);
Queue_Append(list, (void *)29);
Queue_Append(list, (void *)82);
Queue_Append(list, (void *)35);
Queue_Append(list, (void *)73);

int total = list->length;
for (int i = 0; i < total; i++) {
int n = (int)Queue_Retrieve(list);
NSLog(@"出队列:%d", n);
}

Queue_Destroy(list);
}

@end

重温数据结构之链表

发表于 2018-04-23 | 分类于 数据结构

  数据结构是用来存放和管理各种数据(比如插入、删除、查找和更新等)的一种程序结构,常见的数据结构有数组、字符串、链表、队列、栈、树、HASH表和图等。
  而算法是指解决一个问题的方法及其实现。算法可以理解为由基本运算及规定的运算顺序所构成的完整的解题步骤。比如各种排序方法,折半查找都是常见的算法。算法的判断标准包括时间复杂度和空间复杂度。

0x01 时间复杂度和空间复杂度

1. 时间复杂度

  算法的时间复杂度是指执行算法所需的计算工作量。一个算法花费的时间与算法中语句的执行次数成正比例,哪个算法中语句执行次数多,它花费时间就多。一个算法中的语句执行次数称为语句频度或时间频度,记为T(n),其中n为问题规模。
  一般情况下,算法中基本操作重复执行的次数是问题规模n的某个函数。如T(n)=4n2+5n,或者8n2+1,那么时间复杂度都为:O(n2),取最高幂,并去掉系数。
  常见的时间复杂度有:常数阶O(1),对数阶O(logn),线性阶O(n),线性对数阶O(nlogn),平方阶O(n2),立方阶O(n3),…,k次方阶O(nk),指数阶O(2n)。随着问题规模n的不断增大,上述时间复杂度不断增大,算法的执行效率越低。
  计算的方法是看看有几重循环,只有一重则时间复杂度为O(n),只有二重则时间复杂度为O(n2),以此类推。如果有二分则为O(logn),二分例如快速排序或二分查找,如果一个for循环套一个二分,那么时间复杂度则为O(nlogn)。

2. 空间复杂度

  算法的空间复杂度是指算法需要消耗的内存空间。其计算和表示方法与时间复杂度类似,一般都用复杂度的渐近性来表示。同时间复杂度相比,空间复杂度的分析要简单的多。程序在运行时候动态分配的空间,以及递归栈所需的空间等。这部分的空间大小与算法有关,而那些静态的空间, 比如代码、常量以及简单的变量与空间复杂度无关。

0x01 单链表

  链表通常由一连串节点组成,每个节点包含任意的实例数据和一个用来指向下一个节点地址的指针(next指针)。
  使用链表结构可以克服数组需要预先知道数据大小的缺点,链表结构可以充分利用计算机内存空间,实现灵活的内存动态管理。但是链表失去了数组随机读取的优点,同时链表由于增加了节点的指针域,空间开销相对比较大。
  链表在插入的时候可以达到O(1)的复杂度,但是在查找一个节点或者访问特定编号的节点需要O(n)的时间。
  单链表源码如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#define ERROR   -1000
#define SUCCESS 1000

#define STATUS int

// 节点结构
typedef struct _node {
int val;
struct _node *next;
}Node;

// 链表结构
typedef struct _nodeList {
int length;
}NodeList;

// 创建一个空链表
NodeList* CreateList() {
NodeList *list = (NodeList *)malloc(sizeof(NodeList));
if(list != NULL) {
list->length = 0;
}
return list;
}

// 销毁一个链表
void DestroyList(NodeList *list) {
if(!list) return;

free(list);
}

// 清空一个链表
void ClearList(NodeList *list) {
if(!list) return;

list->length = 0;
}

// 获取链表长度
int GetListLength(NodeList *list) {
if(!list) return ERROR;
return list->length;
}

// 插入节点
STATUS InsertListNode(NodeList *list, Node *node, int position) {
bool status = list != NULL && node != NULL && position >= 0;
if(!status) return ERROR;

Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}
node->next = currentNode->next;
currentNode->next = node;
list->length++;

return SUCCESS;
}

// 获取某个位置的节点
Node* GetListNode(NodeList *list, int position) {
bool status = list != NULL && position >= 0;
if(!status) return NULL;

Node *node = NULL;

Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}
node = currentNode->next;

return node;
}

// 删除某个位置的节点
STATUS DeleteListNode(NodeList *list, int position) {
bool status = list != NULL && position > 0;
if(!status) return ERROR;

Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}

Node *node = currentNode->next; // 要删除的node
currentNode->next = node->next;

list->length--;

return SUCCESS;
}

@interface ViewController ()

@end

@implementation ViewController

- (void)viewDidLoad {
[super viewDidLoad];

NodeList *list = CreateList();

struct _node n1;
struct _node n2;
struct _node n3;

n1.val = 1;
n2.val = 2;
n3.val = 3;

InsertListNode(list, (Node *)&n1, 0);
InsertListNode(list, (Node *)&n2, 1);
InsertListNode(list, (Node *)&n3, 2);

for (int i = 0; i < GetListLength(list); i++) {
Node *node = GetListNode(list, i);
NSLog(@"位置:%d的值:%d", i, node->val);
}

DeleteListNode(list, 1);
NSLog(@"**删除后的链表**\n");

for (int i = 0; i < GetListLength(list); i++) {
Node *node = GetListNode(list, i);
NSLog(@"位置:%d的值:%d", i, node->val);
}

DestroyList(list);
}


@end

// 运行结果:
2018-04-23 16:08:08.572926+0800 testData[34765:133946832] 位置:0的值:1
2018-04-23 16:08:08.573132+0800 testData[34765:133946832] 位置:1的值:2
2018-04-23 16:08:08.575485+0800 testData[34765:133946832] 位置:2的值:3
2018-04-23 16:08:08.575660+0800 testData[34765:133946832] **删除后的链表**

2018-04-23 16:08:08.575789+0800 testData[34765:133946832] 位置:0的值:1
2018-04-23 16:08:08.575915+0800 testData[34765:133946832] 位置:1的值:3

0x02 循环链表

  循环链表即将链表中最后一个元素的next指针指向第一个元素。这样的链表,其任何节点都可以当做头结点,当第一个节点被重复访问时表示遍历结束。
  源码将以约瑟夫问题展现,既:n个人围成一个圆圈,首先第1个人从1开始一个人一个人顺时针报数,报到第m个人,令其出列。然后再从下一个人开始从1顺时针报数,报到第m个人,再令 其出列,…,如此下去,求出列顺序。为了解决这个问题,我们定义一个浮标

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
#define ERROR   -1000
#define SUCCESS 1000

#define STATUS int

// 节点结构
typedef struct _node {
int val;
struct _node *next;
}Node;

// 链表结构
typedef struct _nodeList {
int length;
Node *slider; // 浮标
}NodeList;

// 创建一个空链表
NodeList* CreateList() {
NodeList *list = (NodeList *)malloc(sizeof(NodeList));
if(list != NULL) {
list->length = 0;
list->slider = NULL;
}
return list;
}

// 销毁一个链表
void DestroyList(NodeList *list) {
if(!list) return;

free(list);
}

// 清空一个链表
void ClearList(NodeList *list) {
if(!list) return;

list->length = 0;
list->slider = NULL;
}

// 获取链表长度
int GetListLength(NodeList *list) {
if(!list) return ERROR;
return list->length;
}

// 获取某个位置的节点
Node* GetListNode(NodeList *list, int position) {
bool status = list != NULL && position >= 0;
if(!status) return NULL;

Node *node = NULL;

Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}
node = currentNode->next;

return node;
}

// 插入节点
STATUS InsertListNode(NodeList *list, Node *node, int position) {
bool status = list != NULL && node != NULL && position >= 0;
if(!status) return ERROR;

if(list->length == 0) {
list->slider = node;
}

// list结构体和node结构体相似,所以currentNode这时候的next值跟list的slider一样。所以链表循环自此形成
Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}
node->next = currentNode->next;
currentNode->next = node;
list->length++;

// 如果插入在第一个位置,需要重新将最后一个节点的next指针指向这个新的第一个节点
if(currentNode == (Node *)list) {
Node *lastNode = GetListNode(list, list->length - 1);
lastNode->next = node;
}

return SUCCESS;
}

// 删除某个位置的节点
STATUS DeleteListNode(NodeList *list, int position) {
bool status = list != NULL && position >= 0;
if(!status) return ERROR;

Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}

Node *node = currentNode->next; // 要删除的node
currentNode->next = node->next;

list->length--;

if(list->length == 0) {
list->slider = NULL;
}else{
Node *firstNode = (Node *)list;

// 如果删的是第一个节点,就把最后一个节点的next指针指向被删节点的下一个节点
if(firstNode == node) {
Node *lastNode = GetListNode(list, list->length - 1);
lastNode->next = node->next;
// 如果删的是浮标节点,就把浮标节点的next指针指向被删节点的下一个节点
}else if (node == list->slider) {
list->slider = node->next;
}
}

return SUCCESS;
}

// 删除指定节点
STATUS DeleteListSpecialNode(NodeList *list, Node *node) {
bool status = list != NULL && node != NULL;
if(!status) return ERROR;

Node *currentNode = (Node *)list;
int i = 0;
for (i = 0; i < list->length && currentNode->next != NULL; i++) {
if(currentNode->next == node) break;

currentNode = currentNode->next;
}

return DeleteListNode(list, i);
}

// 重置浮标
void ResetListSlider(NodeList *list) {
if(list == NULL) return;

list->slider = ((Node *)list)->next;
}

// 获取浮标指向的节点
Node* GetListSlider(NodeList *list) {
if(list == NULL) return NULL;

return list->slider;
}

// 将浮标指向下一个节点
Node* NextListSlider(NodeList *list) {
if(list == NULL) return NULL;

Node *node = list->slider;

if(list->slider == NULL){
ResetListSlider(list);
}else{
list->slider = list->slider->next;
}
return node;
}

@interface ViewController ()

@end

@implementation ViewController

- (void)viewDidLoad {
[super viewDidLoad];

NodeList *list = CreateList();

struct _node n1;
struct _node n2;
struct _node n3;
struct _node n4;
struct _node n5;
struct _node n6;
struct _node n7;
struct _node n8;

n1.val = 1;
n2.val = 2;
n3.val = 3;
n4.val = 4;
n5.val = 5;
n6.val = 6;
n7.val = 7;
n8.val = 8;

InsertListNode(list, (Node *)&n1, GetListLength(list));
InsertListNode(list, (Node *)&n2, GetListLength(list));
InsertListNode(list, (Node *)&n3, GetListLength(list));
InsertListNode(list, (Node *)&n4, GetListLength(list));
InsertListNode(list, (Node *)&n5, GetListLength(list));
InsertListNode(list, (Node *)&n6, GetListLength(list));
InsertListNode(list, (Node *)&n7, GetListLength(list));
InsertListNode(list, (Node *)&n8, GetListLength(list));

for (int i = 0; i < GetListLength(list); i++) {
Node *node = GetListNode(list, i);
NSLog(@"位置:%d的值:%d", i, node->val);
}

NSLog(@"------------\n");
ResetListSlider(list);

while (GetListLength(list) > 0) {
for (int i = 1; i < 3; i++) {
NextListSlider(list);
}
Node *node = GetListSlider(list);
DeleteListSpecialNode(list, node);
NSLog(@"出列:%d", node->val);
}

DestroyList(list);
}

@end

// 运行结果:
2018-04-24 09:22:22.752025+0800 testData[39552:141135212] 位置:0的值:1
2018-04-24 09:22:22.752217+0800 testData[39552:141135212] 位置:1的值:2
2018-04-24 09:22:22.752373+0800 testData[39552:141135212] 位置:2的值:3
2018-04-24 09:22:22.752499+0800 testData[39552:141135212] 位置:3的值:4
2018-04-24 09:22:22.752622+0800 testData[39552:141135212] 位置:4的值:5
2018-04-24 09:22:22.752755+0800 testData[39552:141135212] 位置:5的值:6
2018-04-24 09:22:22.752875+0800 testData[39552:141135212] 位置:6的值:7
2018-04-24 09:22:22.752998+0800 testData[39552:141135212] 位置:7的值:8
2018-04-24 09:22:22.753101+0800 testData[39552:141135212] ------------
2018-04-24 09:22:22.753233+0800 testData[39552:141135212] 出列:3
2018-04-24 09:22:22.753356+0800 testData[39552:141135212] 出列:6
2018-04-24 09:22:22.753494+0800 testData[39552:141135212] 出列:1
2018-04-24 09:22:22.753635+0800 testData[39552:141135212] 出列:4
2018-04-24 09:22:22.753787+0800 testData[39552:141135212] 出列:7
2018-04-24 09:22:22.753925+0800 testData[39552:141135212] 出列:2
2018-04-24 09:22:22.754040+0800 testData[39552:141135212] 出列:5
2018-04-24 09:22:22.754307+0800 testData[39552:141135212] 出列:8

0x03 双向链表

  双向链表可以解决单链表无法直接访问前驱元素的问题,单链表的逆序访问时一个极其耗时的操作。所以在双链表中,新增一个指向前驱的指针。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
#define ERROR   -1000
#define SUCCESS 1000

#define STATUS int

// 节点结构
typedef struct _node {
int val;
struct _node *next;
struct _node *pre;
}Node;

// 链表结构
typedef struct _nodeList {
int length;
Node *slider; // 浮标
}NodeList;

// 创建一个空链表
NodeList* CreateList() {
NodeList *list = (NodeList *)malloc(sizeof(NodeList));
if(list != NULL) {
list->length = 0;
list->slider = NULL;
}
return list;
}

// 销毁一个链表
void DestroyList(NodeList *list) {
if(!list) return;

free(list);
}

// 清空一个链表
void ClearList(NodeList *list) {
if(!list) return;

list->length = 0;
list->slider = NULL;
}

// 获取链表长度
int GetListLength(NodeList *list) {
if(!list) return ERROR;
return list->length;
}

// 获取某个位置的节点
Node* GetListNode(NodeList *list, int position) {
bool status = list != NULL && position >= 0;
if(!status) return NULL;

Node *node = NULL;

Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}
node = currentNode->next;

return node;
}

// 插入节点
STATUS InsertListNode(NodeList *list, Node *node, int position) {
bool status = list != NULL && node != NULL && position >= 0;
if(!status) return ERROR;

if(list->length == 0) {
list->slider = node;
}

// list结构体和node结构体相似,所以currentNode这时候的next值跟list的slider一样。所以链表循环自此形成
Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}
node->next = currentNode->next;
currentNode->next->pre = node;
currentNode->next = node;
node->pre = currentNode;
list->length++;

// 如果插入在第一个位置,需要重新将最后一个节点的next指针指向这个新的第一个节点
if(currentNode == (Node *)list) {
Node *lastNode = GetListNode(list, list->length - 1);
lastNode->next = node;
node->pre = lastNode;
}

return SUCCESS;
}

// 删除某个位置的节点
STATUS DeleteListNode(NodeList *list, int position) {
bool status = list != NULL && position >= 0;
if(!status) return ERROR;

Node *currentNode = (Node *)list;
for (int i = 0; i < position && currentNode->next != NULL; i++) {
currentNode = currentNode->next;
}

Node *node = currentNode->next; // 要删除的node
currentNode->next = node->next;
node->next->pre = currentNode;

list->length--;

if(list->length == 0) {
list->slider = NULL;
}else{
Node *firstNode = (Node *)list;

// 如果删的是第一个节点,就把最后一个节点的next指针指向被删节点的下一个节点
if(firstNode == node) {
Node *lastNode = GetListNode(list, list->length - 1);
lastNode->next = node->next;
// 如果删的是浮标节点,就把浮标节点的next指针指向被删节点的下一个节点
}else if (node == list->slider) {
list->slider = node->next;
}
}

return SUCCESS;
}

// 删除指定节点
STATUS DeleteListSpecialNode(NodeList *list, Node *node) {
bool status = list != NULL && node != NULL;
if(!status) return ERROR;

Node *currentNode = (Node *)list;
int i = 0;
for (i = 0; i < list->length && currentNode->next != NULL; i++) {
if(currentNode->next == node) break;

currentNode = currentNode->next;
}

return DeleteListNode(list, i);
}

// 重置游标
void ResetListSlider(NodeList *list) {
if(list == NULL) return;

list->slider = ((Node *)list)->next;
}

// 获取游标指向的节点
Node* GetListSlider(NodeList *list) {
if(list == NULL) return NULL;

return list->slider;
}

// 将游标指向下一个节点
Node* NextListSlider(NodeList *list) {
if(list == NULL) return NULL;

Node *node = list->slider;

if(list->slider == NULL){
ResetListSlider(list);
}else{
list->slider = list->slider->next;
}
return node;
}

@interface ViewController ()

@end

@implementation ViewController

- (void)viewDidLoad {
[super viewDidLoad];

NodeList *list = CreateList();

struct _node n1;
struct _node n2;
struct _node n3;
struct _node n4;
struct _node n5;
struct _node n6;
struct _node n7;
struct _node n8;

n1.val = 1;
n2.val = 2;
n3.val = 3;
n4.val = 4;
n5.val = 5;
n6.val = 6;
n7.val = 7;
n8.val = 8;

InsertListNode(list, (Node *)&n1, GetListLength(list));
InsertListNode(list, (Node *)&n2, GetListLength(list));
InsertListNode(list, (Node *)&n3, GetListLength(list));
InsertListNode(list, (Node *)&n4, GetListLength(list));
InsertListNode(list, (Node *)&n5, GetListLength(list));
InsertListNode(list, (Node *)&n6, GetListLength(list));
InsertListNode(list, (Node *)&n7, GetListLength(list));
InsertListNode(list, (Node *)&n8, GetListLength(list));

for (int i = 0; i < GetListLength(list); i++) {
Node *node = GetListNode(list, i);
NSLog(@"出列:%d,其前值:%d,其后值:%d", node->val, node->pre->val, node->next->val);
}

DestroyList(list);
}

@end

// 运行结果:
2018-04-24 10:38:34.810183+0800 testData[48305:141728412] 出列:1,其前值:8,其后值:2
2018-04-24 10:38:34.810361+0800 testData[48305:141728412] 出列:2,其前值:1,其后值:3
2018-04-24 10:38:34.810465+0800 testData[48305:141728412] 出列:3,其前值:2,其后值:4
2018-04-24 10:38:34.810589+0800 testData[48305:141728412] 出列:4,其前值:3,其后值:5
2018-04-24 10:38:34.810699+0800 testData[48305:141728412] 出列:5,其前值:4,其后值:6
2018-04-24 10:38:34.810818+0800 testData[48305:141728412] 出列:6,其前值:5,其后值:7
2018-04-24 10:38:34.810910+0800 testData[48305:141728412] 出列:7,其前值:6,其后值:8
2018-04-24 10:38:34.811000+0800 testData[48305:141728412] 出列:8,其前值:7,其后值:1

  那么,如果不通过双向链表,一个单链表怎么反转呢?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
for (int i = len-1; i >= 0; i--) {
Node *node = GetListNode(list, i);
NSLog(@"出列:%d", node->val);
}

// 运行结果:
2018-04-24 10:56:29.439705+0800 testData[50135:141858946] 出列:1
2018-04-24 10:56:29.439830+0800 testData[50135:141858946] 出列:2
2018-04-24 10:56:29.439925+0800 testData[50135:141858946] 出列:3
2018-04-24 10:56:29.440032+0800 testData[50135:141858946] 出列:4
2018-04-24 10:56:29.440128+0800 testData[50135:141858946] 出列:5
2018-04-24 10:56:29.440233+0800 testData[50135:141858946] 出列:6
2018-04-24 10:56:29.440325+0800 testData[50135:141858946] 出列:7
2018-04-24 10:56:29.440432+0800 testData[50135:141858946] 出列:8
2018-04-24 10:56:29.440512+0800 testData[50135:141858946] ----开始反转-----
2018-04-24 10:56:29.440594+0800 testData[50135:141858946] 出列:8
2018-04-24 10:56:29.440667+0800 testData[50135:141858946] 出列:7
2018-04-24 10:56:29.440741+0800 testData[50135:141858946] 出列:6
2018-04-24 10:56:29.440815+0800 testData[50135:141858946] 出列:5
2018-04-24 10:56:29.440888+0800 testData[50135:141858946] 出列:4
2018-04-24 10:56:29.441030+0800 testData[50135:141858946] 出列:3
2018-04-24 10:56:29.441216+0800 testData[50135:141858946] 出列:2
2018-04-24 10:56:29.441386+0800 testData[50135:141858946] 出列:1

  这样就完成了这个需求。

多线程中锁的应用

发表于 2018-04-06 | 分类于 iOS

  在多线程运用中,我们经常会遇到资源竞争问题。比如多个线程同时往一个文件写入,这种情况是不被允许的,会造成安全隐患。正常的做法是一个线程写完后,下一个线程才能写入。而在多线程中,这一项技术叫做线程同步技术。

  那么在iOS中,有哪些方案可以使用?

  • pthread_mutex
  • NSLock、NSRecursiveLock
  • NSCondition、NSConditionLock

  • os_unfair_lock(替换被废弃的OSSpinLock,iOS 10+)

  • @synchronized
  • dispatch_semaphore
  • 串行队列

0x01 pthread_mutex

  也叫互斥锁,以休眠来等待锁被解开。以经典的“存取钱”为例,我们看下如何使用pthread_mutex控制线程同步问题。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
static pthread_mutex_t g_mutex;

- (void)drawMoney {
pthread_mutex_lock(&g_mutex);
NSLog(@"取钱了...");
sleep(2);
pthread_mutex_unlock(&g_mutex);
}

- (void)saveMoney {
pthread_mutex_lock(&g_mutex);
NSLog(@"存钱了...");
sleep(2);
pthread_mutex_unlock(&g_mutex);
}

- (void)initLock {
// 初始化锁的属性
pthread_mutexattr_t attri;
pthread_mutexattr_init(&attri);
pthread_mutexattr_settype(&attri, PTHREAD_MUTEX_NORMAL);
// 初始化锁
pthread_mutex_t mutex;
pthread_mutex_init(&mutex, &attri);

// 销毁锁属性
pthread_mutexattr_destroy(&attri);

g_mutex = mutex;
}

- (void)viewDidLoad {
[super viewDidLoad];

[self initLock];

for (NSInteger i = 0; i < 50; i++) {
[self performSelectorInBackground:@selector(drawMoney) withObject:nil];
[self performSelectorInBackground:@selector(saveMoney) withObject:nil];
}
}

- (void)dealloc {
pthread_mutex_destroy(&g_mutex);
}

  需要注意的是,使用pthread_mutex需要手动进行销毁。

  如下代码,我们进行递归调用的时候会发生什么?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
- (void)recursiveTest {
pthread_mutex_lock(&g_mutex);
NSLog(@"递归测试...");
[self recursiveTest];
pthread_mutex_unlock(&g_mutex);
}

- (void)initLock {
// 初始化锁的属性
pthread_mutexattr_t attri;
pthread_mutexattr_init(&attri);
pthread_mutexattr_settype(&attri, PTHREAD_MUTEX_NORMAL);
// 初始化锁
pthread_mutex_t mutex;
pthread_mutex_init(&mutex, &attri);

// 销毁锁属性
pthread_mutexattr_destroy(&attri);

g_mutex = mutex;
}

- (void)viewDidLoad {
[super viewDidLoad];

[self initLock];

for (NSInteger i = 0; i < 50; i++) {
[self performSelectorInBackground:@selector(recursiveTest) withObject:nil];
}
}

  由于递归调用,第一次进入recursiveTest的时候调用pthread_mutex_lock加锁,然后在临界区(lock与unlock之间的代码称为临界区)里又调用了自己,然而之前锁已经加上了,所以整理等待解锁,但是等待解锁了就不能继续往下执行,就造成不能执行到pthread_mutex_unlock解锁,这样就导致锁一直不能放开,所以造成了死锁。

  解决这样的问题,我们可以使用递归锁。初始化属性的时候把PTHREAD_MUTEX_NORMAL改为PTHREAD_MUTEX_RECURSIVE。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
- (void)initLock {
// 初始化锁的属性
pthread_mutexattr_t attri;
pthread_mutexattr_init(&attri);
pthread_mutexattr_settype(&attri, PTHREAD_MUTEX_RECURSIVE);
// 初始化锁
pthread_mutex_t mutex;
pthread_mutex_init(&mutex, &attri);

// 销毁锁属性
pthread_mutexattr_destroy(&attri);

g_mutex = mutex;
}

- (void)recursiveTest {
pthread_mutex_lock(&g_mutex);
NSLog(@"递归测试...");
[self recursiveTest];
pthread_mutex_unlock(&g_mutex);
}

- (void)viewDidLoad {
[super viewDidLoad];

[self initLock];

for (NSInteger i = 0; i < 50; i++) {
[self performSelectorInBackground:@selector(recursiveTest) withObject:nil];
}
}

  我们有时需要加锁的前提是满足某些条件,比如现在有这么一个需求,同时依次调用三个接口,调用完毕后合并这三个接口得到的数据,我们可以实现为

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
static pthread_mutex_t g_mutex;
static pthread_cond_t g_cond;

- (void)initLock {
// 初始化锁的属性
pthread_mutexattr_t attri;
pthread_mutexattr_init(&attri);
pthread_mutexattr_settype(&attri, PTHREAD_MUTEX_DEFAULT);
// 初始化锁条件
pthread_cond_t cond;
pthread_cond_init(&cond, NULL);
// 初始化锁
pthread_mutex_t mutex;
pthread_mutex_init(&mutex, &attri);

// 销毁锁属性
pthread_mutexattr_destroy(&attri);

g_mutex = mutex;
g_cond = cond;
}

- (void)loadNetworkData {
pthread_mutex_lock(&g_mutex);
NSLog(@"请求数据...");
sleep(2);
pthread_cond_signal(&g_cond);
pthread_mutex_unlock(&g_mutex);
}

- (void)mergeData {
pthread_cond_wait(&g_cond, &g_mutex);
NSLog(@"合并数据...");
}

- (void)viewDidLoad {
[super viewDidLoad];

[self initLock];
[self performSelectorInBackground:@selector(mergeData) withObject:nil];
for (NSInteger i = 0; i < 3; i++) {
[self performSelectorInBackground:@selector(loadNetworkData) withObject:nil];
}
}

- (void)dealloc {
pthread_mutex_destroy(&g_mutex);
pthread_cond_destroy(&g_cond);
}

0x02 NSLock、NSRecursiveLock、NSCondition、NSConditionLock

  这几个Lock都是基于mutex的封装。这里只对NSConditionLock做代码示例,其他的api已经够简单明了了,所以不做详细示例。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
- (void)initLock {
self.conditionLock = [[NSConditionLock alloc] initWithCondition:1];
}

- (void)test1 {
[self.conditionLock lockWhenCondition:1];
NSLog(@"测试1...");
sleep(2);
[self.conditionLock unlockWithCondition:2];
}

- (void)test2 {
[self.conditionLock lockWhenCondition:2];
NSLog(@"测试2...");
sleep(2);
[self.conditionLock unlockWithCondition:3];
}

- (void)test3 {
[self.conditionLock lockWhenCondition:3];
NSLog(@"测试3...");
sleep(2);
[self.conditionLock unlock];
}

- (void)viewDidLoad {
[super viewDidLoad];

[self initLock];

[self performSelectorInBackground:@selector(test3) withObject:nil];
[self performSelectorInBackground:@selector(test2) withObject:nil];
[self performSelectorInBackground:@selector(test1) withObject:nil];
}

// 运行结果
2018-04-06 14:45:54.648160+0800 testData[53895:31297465] 测试1...
2018-04-06 14:45:56.654220+0800 testData[53895:31297464] 测试2...
2018-04-06 14:45:58.731368+0800 testData[53895:31297463] 测试3...

  NSConditionLock可以设置多任务间的依赖关系。

0x03 os_unfair_lock

  作为替代OSSpinLock的方案,曾一度以为也是自旋锁,但是查看汇编后调用了睡眠函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
- (void)viewDidLoad {
[super viewDidLoad];

[self initLock];

for (NSInteger i = 0; i < 50; i++) {
[self performSelectorInBackground:@selector(drawMoney) withObject:nil];
[self performSelectorInBackground:@selector(saveMoney) withObject:nil];
}
}

- (void)drawMoney {
os_unfair_lock_lock(&_lock);
NSLog(@"取钱了...");
sleep(2);
os_unfair_lock_unlock(&_lock);
}

- (void)saveMoney {
os_unfair_lock_lock(&_lock);
NSLog(@"存钱了...");
sleep(2);
os_unfair_lock_unlock(&_lock);
}

- (void)initLock {
self.lock = OS_UNFAIR_LOCK_INIT;
}

  查看汇编

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
libsystem_platform.dylib`os_unfair_lock_lock:
-> 0x108edd616 <+0>: movl %gs:0x18, %esi
0x108edd61e <+8>: xorl %eax, %eax
0x108edd620 <+10>: lock
0x108edd621 <+11>: cmpxchgl %esi, (%rdi)
0x108edd624 <+14>: jne 0x108edd627 ; <+17>
0x108edd626 <+16>: retq
0x108edd627 <+17>: xorl %edx, %edx
; 调用_os_unfair_lock_lock_slow
0x108edd629 <+19>: jmp 0x108edd62e ; _os_unfair_lock_lock_slow


libsystem_platform.dylib`_os_unfair_lock_lock_slow:
.......
; 调用__ulock_wait
0x108edd6b5 <+135>: callq 0x108edf3b8 ; symbol stub for: __ulock_wait

; 进入睡眠等待
libsystem_kernel.dylib`__ulock_wait:
-> 0x108eb6158 <+0>: movl $0x2000203, %eax ; imm = 0x2000203
0x108eb615d <+5>: movq %rcx, %r10
0x108eb6160 <+8>: syscall
0x108eb6162 <+10>: jae 0x108eb616c ; <+20>
0x108eb6164 <+12>: movq %rax, %rdi
0x108eb6167 <+15>: jmp 0x108eacb00 ; cerror_nocancel
0x108eb616c <+20>: retq
0x108eb616d <+21>: nop
0x108eb616e <+22>: nop
0x108eb616f <+23>: nop

0x04 @synchronized

  @synchronized也是对mutex的封装。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
- (void)drawMoney {
@synchronized(self) {
NSLog(@"取钱了...");
sleep(2);
}
}

- (void)saveMoney {
@synchronized(self) {
NSLog(@"存钱了...");
sleep(2);
}
}

- (void)viewDidLoad {
[super viewDidLoad];

for (NSInteger i = 0; i < 50; i++) {
[self performSelectorInBackground:@selector(drawMoney) withObject:nil];
[self performSelectorInBackground:@selector(saveMoney) withObject:nil];
}

}

  使用clang转换代码

xcrun -sdk iphoneos clang -arch arm64 -rewrite-objc -fobjc-arc -fobjc-runtime=ios-8.0.0 ViewController.m

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
static void _I_ViewController_drawMoney(ViewController * self, SEL _cmd) {
{
id _rethrow = 0;
id _sync_obj = (id)self;
objc_sync_enter(_sync_obj);
try {
struct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}
~_SYNC_EXIT() {objc_sync_exit(sync_exit);}
id sync_exit;
} _sync_exit(_sync_obj);

NSLog((NSString *)&__NSConstantStringImpl__var_folders_fg_0_j9qb4d4fn9_wnf3fp9q40ssw1_56_T_ViewController_bc9092_mi_0);
sleep(2);
} catch (id e) {_rethrow = e;}
{ struct _FIN { _FIN(id reth) : rethrow(reth) {}
~_FIN() { if (rethrow) objc_exception_throw(rethrow); }
id rethrow;
} _fin_force_rethow(_rethrow);}
}

}


static void _I_ViewController_saveMoney(ViewController * self, SEL _cmd) {
{ id _rethrow = 0; id _sync_obj = (id)self; objc_sync_enter(_sync_obj);
try {
struct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}
~_SYNC_EXIT() {objc_sync_exit(sync_exit);}
id sync_exit;
} _sync_exit(_sync_obj);

NSLog((NSString *)&__NSConstantStringImpl__var_folders_fg_0_j9qb4d4fn9_wnf3fp9q40ssw1_56_T_ViewController_bc9092_mi_1);
sleep(2);
} catch (id e) {_rethrow = e;}
{ struct _FIN { _FIN(id reth) : rethrow(reth) {}
~_FIN() { if (rethrow) objc_exception_throw(rethrow); }
id rethrow;
} _fin_force_rethow(_rethrow);}
}

}

  转换后的代码,我们可以看到@synchronized内部其实调用了objc_sync_enter和objc_sync_exit。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
int objc_sync_enter(id obj)
{
int result = OBJC_SYNC_SUCCESS;

if (obj) {
SyncData* data = id2data(obj, ACQUIRE);
assert(data);
data->mutex.lock();
} else {
// @synchronized(nil) does nothing
if (DebugNilSync) {
_objc_inform("NIL SYNC DEBUG: @synchronized(nil); set a breakpoint on objc_sync_nil to debug");
}
objc_sync_nil();
}

return result;
}

  为什么说是对mutex的封装,我们看下SyncData的定义

1
2
3
4
5
6
7
8
9
10
11
12
13
typedef struct SyncData {
struct SyncData* nextData;
DisguisedPtr<objc_object> object;
int32_t threadCount; // number of THREADS using this block
recursive_mutex_t mutex;
} SyncData;

using recursive_mutex_t = recursive_mutex_tt<LOCKDEBUG>;

class recursive_mutex_tt : nocopy_t {
pthread_mutex_t mLock;
... ...
}

  我们定义属性的时候,对其原子性经常定义为nonatomic,因为性能等原因很少用到atomic。我们知道如果对OC对象使用atomic,会自动在其getter\setter方法里加锁。我们定义属性如下

1
@property (atomic, strong) NSArray *datas;

  我们汇编查看其setter方法,内部调用了objc_setProperty_atomic

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
testData`-[ViewController setDatas:]:
0x10169fcf0 <+0>: pushq %rbp
0x10169fcf1 <+1>: movq %rsp, %rbp
0x10169fcf4 <+4>: subq $0x20, %rsp
0x10169fcf8 <+8>: movq %rdi, -0x8(%rbp)
0x10169fcfc <+12>: movq %rsi, -0x10(%rbp)
0x10169fd00 <+16>: movq %rdx, -0x18(%rbp)
-> 0x10169fd04 <+20>: movq -0x10(%rbp), %rsi
0x10169fd08 <+24>: movq -0x8(%rbp), %rdx
0x10169fd0c <+28>: movq 0x8445(%rip), %rcx ; ViewController._datas
0x10169fd13 <+35>: movq -0x18(%rbp), %rdi
0x10169fd17 <+39>: movq %rdi, -0x20(%rbp)
0x10169fd1b <+43>: movq %rdx, %rdi
0x10169fd1e <+46>: movq -0x20(%rbp), %rdx
0x10169fd22 <+50>: callq 0x1016a2e9a ; symbol stub for: objc_setProperty_atomic
0x10169fd27 <+55>: addq $0x20, %rsp
0x10169fd2b <+59>: popq %rbp
0x10169fd2c <+60>: retq

  objc_setProperty_atomic内部使用了os_unfair_lock加锁

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
void objc_setProperty_atomic(id self, SEL _cmd, id newValue, ptrdiff_t offset)
{
reallySetProperty(self, _cmd, newValue, offset, true, false, false);
}

static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy)
{
if (offset == 0) {
object_setClass(self, newValue);
return;
}

id oldValue;
id *slot = (id*) ((char*)self + offset);

if (copy) {
newValue = [newValue copyWithZone:nil];
} else if (mutableCopy) {
newValue = [newValue mutableCopyWithZone:nil];
} else {
if (*slot == newValue) return;
newValue = objc_retain(newValue);
}

if (!atomic) {
oldValue = *slot;
*slot = newValue;
} else {// 如果atomic为YES
spinlock_t& slotlock = PropertyLocks[slot];
slotlock.lock();
oldValue = *slot;
*slot = newValue;
slotlock.unlock();
}

objc_release(oldValue);
}

using spinlock_t = mutex_tt<LOCKDEBUG>;

class mutex_tt : nocopy_t {
os_unfair_lock mLock;
... ...
}

  对于像NSMutableArray和NSMutableDictionary,使用atomic虽然对其setter\getter方法进行了加锁,但是在多线程环境下使用addObject:或removeObjectAtIndex:这类对对象内部数据操作的方法,其实还是不安全的。也就是说加锁仅限于读取对象(NSArray *array = self.datas;)和修改对象(self.datas = @[].mutableCopy),但这个对象其内部的方法操作是不安全的。

0x05 dispatch_semaphore和串行队列

  在多线程篇里已经提过,不重复介绍。

浅谈iOS多线程(源码)

发表于 2018-01-16 | 分类于 iOS

0x00 开篇

  上一篇文章,我们对于常用的GCD使用方法大致理了一遍,那么在使用过程中,我们肯定有这样的疑问?队列是怎么被创建的?任务是怎么放进队列的?为什么会造成死锁?等等等问题,所以在这一篇文章里,我们会从源码的阅读过程中解答这些疑问。虽然网上也有很多讲解源码的文章,但文章对应的源码都不是最新的,这篇文章是根据最新源码来进行阅读的。
  进入正题之前,我们需要做些准备工作。了解消息的本质

0x01 Mach

  Mach是XNU的核心,被BSD层包装。XNU由以下几个组件组成:

  • MACH内核
    • 进程和线程抽象
    • 虚拟内存管理
    • 任务调度
    • 进程间通信和消息传递机制
  • BSD
    • UNIX进程模型
    • POSIX线程模型
    • UNIX用户与组
    • 网络协议栈
    • 文件系统访问
    • 设备访问
  • libKern
  • I/O Kit

  Mach的独特之处在于选择了通过消息传递的方式实现对象与对象之间的通信。而其他架构一个对象要访问另一个对象需要通过一个大家都知道的接口,而Mach对象不能直接调用另一个对象,而是必须传递消息。

  一条消息就像网络包一样,定义为透明的blob(binary larger object,二进制大对象),通过固定的包头进行分装。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
typedef struct
{
mach_msg_header_t header;
mach_msg_body_t body;
} mach_msg_base_t;

typedef struct
{
mach_msg_bits_t msgh_bits; // 消息头标志位
mach_msg_size_t msgh_size; // 大小
mach_port_t msgh_remote_port; // 目标(发消息)或源(接消息)
mach_port_t msgh_local_port; // 源(发消息)或目标(接消息)
mach_port_name_t msgh_voucher_port;
mach_msg_id_t msgh_id; // 唯一id
} mach_msg_header_t;

  Mach消息的发送和接收都是通过同一个API函数mach_msg()进行的。这个函数在用户态和内核态都有实现。为了实现消息的发送和接收,mach_msg()函数调用了一个Mach陷阱(trap)。Mach陷阱就是Mach中和系统调用等同的概念。在用户态调用mach_msg_trap()会引发陷阱机制,切换到内核态,在内核态中,内核实现的mach_msg()会完成实际的工作。这个函数也将会在下面的源码分析中遇到。

  每一个BSD进程都在底层关联一个Mach任务对象,因为Mach提供的都是非常底层的抽象,提供的API从设计上讲很基础且不完整,所以需要在这之上提供一个更高的层次以实现完整的功能。我们开发层遇到的进程和线程就是BSD层对Mach的任务和线程的复杂包装。

  进程填充的是线程,而线程是二进制代码的实际执行单元。用户态的线程始于对pthread_create的调用。这个函数的又由bsdthread_create系统调用完成,而bsdthread_create又其实是Mach中的thread_create的复杂包装,说到底真正的线程创建还是有Mach层完成。

  在UNIX中,进程不能被创建出来,都是通过fork()系统调用复制出来的。复制出来的进程都会被要加载的执行程序覆盖整个内存空间。

  接着,了解下常用的宏和常用的数据结构体。

0x02 源码中常见的宏

1. __builtin_expect

  这个其实是个函数,针对编译器优化的一个函数,后面几个宏是对这个函数的封装,所以提前拎出来说一下。写代码中我们经常会遇到条件判断语句

1
2
3
4
5
if(今天是工作日) {
printf("好好上班");
}else{
printf("好好睡觉");
}

  CPU读取指令的时候并非一条一条的来读,而是多条一起加载进来,比如已经加载了if(今天是工作日) printf(“好好上班”);的指令,这时候条件式如果为非,也就是非工作日,那么CPU继续把printf(“好好睡觉”);这条指令加载进来,这样就造成了性能浪费的现象。
  __builtin_expect的第一个参数是实际值,第二个参数是预测值。使用这个目的是告诉编译器if条件式是不是有更大的可能被满足。

2. likely和unlikely

  解开这个宏后其实是对__builtin_expect封装,likely表示更大可能成立,unlikely表示更大可能不成立。

1
2
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)

  遇到这样的,if(likely(a == 0))理解成if(a==0)即可,unlikely也是同样的。

3. fastpath和slowpath

  跟上面也是差不多的,fastpath表示更大可能成立,slowpath表示更大可能不成立

1
2
#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l))
#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l))

  这两个理解起来跟likely和unlikely一样,只需要关注里面的条件式是否满足即可。

4. os_atomic_cmpxchg

  其内部就是atomic_compare_exchange_strong_explicit函数,这个函数的作用是:第二个参数与第一个参数值比较,如果相等,第三个参数的值替换第一个参数的值。如果不相等,把第一个参数的值赋值到第二个参数上。

1
2
3
4
#define os_atomic_cmpxchg(p, e, v, m) \
({ _os_atomic_basetypeof(p) _r = (e); \
atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
&_r, v, memory_order_##m, memory_order_relaxed); })

5. os_atomic_store2o

  将第二个参数,保存到第一个参数

1
2
3
#define os_atomic_store2o(p, f, v, m)  os_atomic_store(&(p)->f, (v), m)
#define os_atomic_store(p, v, m) \
atomic_store_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m)

6. os_atomic_inc_orig

  将1保存到第一个参数中

1
2
3
4
5
#define os_atomic_inc_orig(p, m)  os_atomic_add_orig((p), 1, m)
#define os_atomic_add_orig(p, v, m) _os_atomic_c11_op_orig((p), (v), m, add, +)
#define _os_atomic_c11_op_orig(p, v, m, o, op) \
atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \
memory_order_##m)

0x03 数据结构体

  接着,了解一些常用数据结构体。

1. dispatch_queue_t

1
typedef struct dispatch_queue_s *dispatch_queue_t;

  我们看下dispatch_queue_s怎么定义的。发现其内部有个_DISPATCH_QUEUE_HEADER宏定义。

1
2
3
4
struct dispatch_queue_s {
_DISPATCH_QUEUE_HEADER(queue);
DISPATCH_QUEUE_CACHELINE_PADDING;
} DISPATCH_ATOMIC64_ALIGN;

  解开_DISPATCH_QUEUE_HEADER后发现又一个DISPATCH_OBJECT_HEADER宏定义,继续拆解

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#define _DISPATCH_QUEUE_HEADER(x) \
struct os_mpsc_queue_s _as_oq[0]; \
DISPATCH_OBJECT_HEADER(x); \
_OS_MPSC_QUEUE_FIELDS(dq, dq_state); \
uint32_t dq_side_suspend_cnt; \
dispatch_unfair_lock_s dq_sidelock; \
union { \
dispatch_queue_t dq_specific_q; \
struct dispatch_source_refs_s *ds_refs; \
struct dispatch_timer_source_refs_s *ds_timer_refs; \
struct dispatch_mach_recv_refs_s *dm_recv_refs; \
}; \
DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \
const uint16_t dq_width, \
const uint16_t __dq_opaque \
); \
DISPATCH_INTROSPECTION_QUEUE_HEADER

  还有一层宏_DISPATCH_OBJECT_HEADER

1
2
3
#define DISPATCH_OBJECT_HEADER(x) \
struct dispatch_object_s _as_do[0]; \
_DISPATCH_OBJECT_HEADER(x)

  不熟悉##的作用的同学,这里先说明下这个作用就拼接成字符串,比如x为group的话,下面就会拼接为dispatch_group这样的。

1
2
3
4
5
6
7
#define _DISPATCH_OBJECT_HEADER(x) \
struct _os_object_s _as_os_obj[0]; \
OS_OBJECT_STRUCT_HEADER(dispatch_##x); \
struct dispatch_##x##_s *volatile do_next; \
struct dispatch_queue_s *do_targetq; \
void *do_ctxt; \
void *do_finalizer

  来到OS_OBJECT_STRUCT_HEADER之后,我们需要注意一个成员变量,记住这个成员变量名字叫做do_vtable。再继续拆解_OS_OBJECT_HEADER发现里面起就是一个isa指针和引用计数一些信息。

1
2
3
4
5
6
7
8
9
10
11
12
#define OS_OBJECT_STRUCT_HEADER(x) \
_OS_OBJECT_HEADER(\
const void *_objc_isa, \
do_ref_cnt, \
do_xref_cnt); \
// 注意这个成员变量,后面将任务Push到队列就是通过这个变量
const struct x##_vtable_s *do_vtable

#define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \
isa; /* must be pointer-sized */ \
int volatile ref_cnt; \
int volatile xref_cnt

2. dispatch_continuation_t

  说到这个结构体,如果没看过源码的话,肯定对这个结构体很陌生,因为对外的api里面没有跟continuation有关的。所以这里先说下这个结构体就是用来封装block对象的,保存block的上下文环境和block执行函数等。

1
2
3
4
typedef struct dispatch_continuation_s {
struct dispatch_object_s _as_do[0];
DISPATCH_CONTINUATION_HEADER(continuation);
} *dispatch_continuation_t;

  看下里面的宏DISPATCH_CONTINUATION_HEADER

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#define DISPATCH_CONTINUATION_HEADER(x) \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct dispatch_##x##_s *volatile do_next; \
struct voucher_s *dc_voucher; \
dispatch_function_t dc_func; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other

3. dispatch_object_t

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
typedef union {
struct _os_object_s *_os_obj;
struct dispatch_object_s *_do;
struct dispatch_continuation_s *_dc;
struct dispatch_queue_s *_dq;
struct dispatch_queue_attr_s *_dqa;
struct dispatch_group_s *_dg;
struct dispatch_source_s *_ds;
struct dispatch_mach_s *_dm;
struct dispatch_mach_msg_s *_dmsg;
struct dispatch_source_attr_s *_dsa;
struct dispatch_semaphore_s *_dsema;
struct dispatch_data_s *_ddata;
struct dispatch_io_s *_dchannel;
struct dispatch_operation_s *_doperation;
struct dispatch_disk_s *_ddisk;
} dispatch_object_t DISPATCH_TRANSPARENT_UNION;

4. dispatch_function_t

  dispatch_function_t 只是一个函数指针

1
typedef void (*dispatch_function_t)(void *_Nullable);

  至此,一些常用的宏和数据结构体介绍完毕,接下来,我们真正的要一起阅读GCD相关的源码了。

0x03 创建队列

  首先我们先从创建队列讲起。我们已经很熟悉,创建队列的方法是调用dispatch_queue_create函数。

  • 其内部又调用了_dispatch_queue_create_with_target函数
  • DISPATCH_TARGET_QUEUE_DEFAULT这个宏其实就是null
    1
    2
    3
    4
    5
    6
    dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
    { // attr一般我们都是传DISPATCH_QUEUE_SERIAL、DISPATCH_QUEUE_CONCURRENT或者nil
    // 而DISPATCH_QUEUE_SERIAL其实就是null
    return _dispatch_queue_create_with_target(label, attr,
    DISPATCH_TARGET_QUEUE_DEFAULT, true);
    }

  _dispatch_queue_create_with_target函数,这里会创建一个root队列,并将自己新建的队列绑定到所对应的root队列上。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
static dispatch_queue_t _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
dispatch_queue_t tq, bool legacy)
{ // 根据上文代码注释里提到的,作者认为调用者传入DISPATCH_QUEUE_SERIAL和nil的几率要大于传DISPATCH_QUEUE_CONCURRENT。所以这里设置个默认值。
// 这里怎么理解呢?只要看做if(!dqa)即可
if (!slowpath(dqa)) {
// _dispatch_get_default_queue_attr里面会将dqa的dqa_autorelease_frequency指定为DISPATCH_AUTORELEASE_FREQUENCY_INHERIT的,inactive也指定为false。这里就不展开了,只需要知道赋了哪些值。因为后面会用到。
dqa = _dispatch_get_default_queue_attr();
} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
}

// 取出优先级
dispatch_qos_t qos = _dispatch_priority_qos(dqa->dqa_qos_and_relpri);

// overcommit单纯从英文理解表示过量使用的意思,那这里这个overcommit就是一个标识符,表示是不是就算负荷很高了,但还是得给我新开一个线程出来给我执行任务。
_dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit;
if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) {
if (tq->do_targetq) {
DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and "
"a non-global target queue");
}
}

// 如果overcommit没有被指定
if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
// 所以对于overcommit,如果是串行的话默认是开启的,而并行是关闭的
overcommit = dqa->dqa_concurrent ?
_dispatch_queue_attr_overcommit_disabled :
_dispatch_queue_attr_overcommit_enabled;
}

// 之前说过初始化队列默认传了DISPATCH_TARGET_QUEUE_DEFAULT,也就是null,所以进入if语句。
if (!tq) {
// 获取一个管理自己队列的root队列。
tq = _dispatch_get_root_queue(
qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
overcommit == _dispatch_queue_attr_overcommit_enabled);
if (slowpath(!tq)) {
DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
}
}

// legacy默认是true的
if (legacy) {
// 之前说过,默认是会给dqa_autorelease_frequency指定为DISPATCH_AUTORELEASE_FREQUENCY_INHERIT,所以这个判断式是成立的
if (dqa->dqa_inactive || dqa->dqa_autorelease_frequency) {
legacy = false;
}
}

// vtable变量很重要,之后会被赋值到之前说的dispatch_queue_t结构体里的do_vtable变量上
const void *vtable;
dispatch_queue_flags_t dqf = 0;

// legacy变为false了
if (legacy) {
vtable = DISPATCH_VTABLE(queue);
} else if (dqa->dqa_concurrent) {
// 如果创建队列的时候传了DISPATCH_QUEUE_CONCURRENT,就是走这里
vtable = DISPATCH_VTABLE(queue_concurrent);
} else {
// 如果创建线程没有指定为并行队列,无论你传DISPATCH_QUEUE_SERIAL还是nil,都会创建一个串行队列。
vtable = DISPATCH_VTABLE(queue_serial);
}

if (label) {
// 判断传进来的字符串是否可变的,如果可变的copy成一份不可变的
const char *tmp = _dispatch_strdup_if_mutable(label);
if (tmp != label) {
dqf |= DQF_LABEL_NEEDS_FREE;
label = tmp;
}
}

// _dispatch_object_alloc里面就将vtable赋值给do_vtable变量上了。
dispatch_queue_t dq = _dispatch_object_alloc(vtable,
sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD);
// 第三个参数根据是否并行队列,如果不是则最多开一个线程,如果是则最多开0x1000 - 2个线程,这个数量很惊人了已经,换成十进制就是(4096 - 2)个。
// dqa_inactive之前说串行是false的
// DISPATCH_QUEUE_ROLE_INNER 也是0,所以这里串行队列的话dqa->dqa_state是0
_dispatch_queue_init(dq, dqf, dqa->dqa_concurrent ?
DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
(dqa->dqa_inactive ? DISPATCH_QUEUE_INACTIVE : 0));

dq->dq_label = label;
#if HAVE_PTHREAD_WORKQUEUE_QOS
dq->dq_priority = dqa->dqa_qos_and_relpri;
if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
}
#endif
_dispatch_retain(tq);
if (qos == QOS_CLASS_UNSPECIFIED) {
_dispatch_queue_priority_inherit_from_target(dq, tq);
}
if (!dqa->dqa_inactive) {
_dispatch_queue_inherit_wlh_from_target(dq, tq);
}
// 自定义的queue的目标队列是root队列
dq->do_targetq = tq;
_dispatch_object_debug(dq, "%s", __func__);
return _dispatch_introspection_queue_create(dq);
}

  这个函数里面还是有几个重要的地方拆出来看下,首先是创建一个root队列_dispatch_get_root_queue函数。取root队列,一般是从一个装有12个root队列数组里面取。

1
2
3
4
5
6
7
8
static inline dispatch_queue_t
_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) {
DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
}
return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}

  看下这个_dispatch_root_queues数组。我们可以看到,每一个优先级都有对应的root队列,每一个优先级又分为是不是可以过载的队列。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
struct dispatch_queue_s _dispatch_root_queues[] = {
#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \
((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \
DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \
DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)
#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
[_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \
.dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
.do_ctxt = &_dispatch_root_queue_contexts[ \
_DISPATCH_ROOT_QUEUE_IDX(n, flags)], \
.dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
.dq_priority = _dispatch_priority_make(DISPATCH_QOS_##n, 0) | flags | \
DISPATCH_PRIORITY_FLAG_ROOTQUEUE | \
((flags & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) ? 0 : \
DISPATCH_QOS_##n << DISPATCH_PRIORITY_OVERRIDE_SHIFT), \
__VA_ARGS__ \
}
_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0,
.dq_label = "com.apple.root.maintenance-qos",
.dq_serialnum = 4,
),
_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.maintenance-qos.overcommit",
.dq_serialnum = 5,
),
_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0,
.dq_label = "com.apple.root.background-qos",
.dq_serialnum = 6,
),
_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.background-qos.overcommit",
.dq_serialnum = 7,
),
_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0,
.dq_label = "com.apple.root.utility-qos",
.dq_serialnum = 8,
),
_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.utility-qos.overcommit",
.dq_serialnum = 9,
),
_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE,
.dq_label = "com.apple.root.default-qos",
.dq_serialnum = 10,
),
_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.default-qos.overcommit",
.dq_serialnum = 11,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0,
.dq_label = "com.apple.root.user-initiated-qos",
.dq_serialnum = 12,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.user-initiated-qos.overcommit",
.dq_serialnum = 13,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0,
.dq_label = "com.apple.root.user-interactive-qos",
.dq_serialnum = 14,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.user-interactive-qos.overcommit",
.dq_serialnum = 15,
),
};

  其中DISPATCH_GLOBAL_OBJECT_HEADER(queue_root),解析到最后是OSdispatch##name##_class这样的这样的,对应的实例对象是如下代码,指定了root队列各个操作对应的函数。

1
2
3
4
5
6
7
8
9
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue,
.do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
.do_kind = "global-queue",
.do_dispose = _dispatch_pthread_root_queue_dispose,
.do_push = _dispatch_root_queue_push,
.do_invoke = NULL,
.do_wakeup = _dispatch_root_queue_wakeup,
.do_debug = dispatch_queue_debug,
);

  其次看下DISPATCH_VTABLE这个宏,这个宏很重要。最后解封也是&OSdispatch##name##_class这样的。其实就是取dispatch_object_t对象。
  如下代码,这里再举个VTABLE的串行对象,里面有各个状态该执行的函数:销毁函、挂起、恢复、push等函数都是在这里指定的。所以这里的do_push我们需要特别留意,后面push block任务到队列,就是通过调用do_push。

1
2
3
4
5
6
7
8
9
10
11
12
13
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue,
.do_type = DISPATCH_QUEUE_SERIAL_TYPE,
.do_kind = "serial-queue",
.do_dispose = _dispatch_queue_dispose,
.do_suspend = _dispatch_queue_suspend,
.do_resume = _dispatch_queue_resume,
.do_finalize_activation = _dispatch_queue_finalize_activation,
.do_push = _dispatch_queue_push,
.do_invoke = _dispatch_queue_invoke,
.do_wakeup = _dispatch_queue_wakeup,
.do_debug = dispatch_queue_debug,
.do_set_targetq = _dispatch_queue_set_target_queue,
);

  继续看下_dispatch_object_alloc和_dispatch_queue_init两个函数,首先看下_dispatch_object_alloc函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
void * _dispatch_object_alloc(const void *vtable, size_t size)
{
// OS_OBJECT_HAVE_OBJC1为1的满足式是:
// #if TARGET_OS_MAC && !TARGET_OS_SIMULATOR && defined(__i386__)
// 所以对于iOS并不满足
#if OS_OBJECT_HAVE_OBJC1
const struct dispatch_object_vtable_s *_vtable = vtable;
dispatch_object_t dou;
dou._os_obj = _os_object_alloc_realized(_vtable->_os_obj_objc_isa, size);
dou._do->do_vtable = vtable;
return dou._do;
#else
return _os_object_alloc_realized(vtable, size);
#endif
}

inline _os_object_t _os_object_alloc_realized(const void *cls, size_t size)
{
_os_object_t obj;
dispatch_assert(size >= sizeof(struct _os_object_s));
while (!fastpath(obj = calloc(1u, size))) {
_dispatch_temporary_resource_shortage();
}
obj->os_obj_isa = cls;
return obj;
}

void _dispatch_temporary_resource_shortage(void)
{
sleep(1);
asm(""); // prevent tailcall
}

  再看下_dispatch_queue_init函数,这里也就是做些初始化工作了

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
static inline void _dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf,
uint16_t width, uint64_t initial_state_bits)
{
uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);

dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
DISPATCH_QUEUE_INACTIVE)) == 0);

if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION;
dq_state |= DLOCK_OWNER_MASK;
dq->do_ref_cnt += 2;
}

dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK);
// 指向DISPATCH_OBJECT_LISTLESS是优化编译器的作用。只是为了生成更好的指令让CPU更好的编码
dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
dqf |= DQF_WIDTH(width);
// dqf 保存进 dq->dq_atomic_flags
os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
dq->dq_state = dq_state;
dq->dq_serialnum =
os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
}

  最后是_dispatch_introspection_queue_create函数,一个内省函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
dispatch_queue_t _dispatch_introspection_queue_create(dispatch_queue_t dq)
{
TAILQ_INIT(&dq->diq_order_top_head);
TAILQ_INIT(&dq->diq_order_bottom_head);
_dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock);
TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, dq, diq_list);
_dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock);

DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq);
if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) {
_dispatch_introspection_queue_create_hook(dq);
}
return dq;
}

  至此,一个队列的创建过程我们大致了解了。大致可以分为这么几点

  • 设置队列优先级
  • 默认创建的是一个串行队列
  • 设置队列挂载的根队列。优先级不同根队列也不同
  • 实例化vtable对象,这个对象给不同队列指定了push、wakeup等函数。

0x04 dispatch_sync

  dispatch_sync直接调用的是dispatch_sync_f

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
void dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
// 很大可能不会走if分支,看做if(_dispatch_block_has_private_data(work))
if (unlikely(_dispatch_block_has_private_data(work))) {
return _dispatch_sync_block_with_private_data(dq, work, 0);
}
dispatch_sync_f(dq, work, _dispatch_Block_invoke(work));
}

void
dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
{
// 串行队列会走到这个if分支
if (likely(dq->dq_width == 1)) {
return dispatch_barrier_sync_f(dq, ctxt, func);
}

// 全局获取的并行队列或者绑定的是非调度线程的队列会走进这个if分支
if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) {
return _dispatch_sync_f_slow(dq, ctxt, func, 0);
}

_dispatch_introspection_sync_begin(dq);
if (unlikely(dq->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dq, ctxt, func, 0);
}
// 自定义并行队列会来到这个函数
_dispatch_sync_invoke_and_complete(dq, ctxt, func);
}

  先说第一种情况,串行队列。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
void dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
dispatch_tid tid = _dispatch_tid_self();

// 队列绑定的是非调度线程就会走这里
if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) {
return _dispatch_sync_f_slow(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT);
}

_dispatch_introspection_sync_begin(dq);
if (unlikely(dq->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT);
}
// 一般会走到这里
_dispatch_queue_barrier_sync_invoke_and_complete(dq, ctxt, func);
}

static void _dispatch_queue_barrier_sync_invoke_and_complete(dispatch_queue_t dq,
void *ctxt, dispatch_function_t func)
{
// 首先会执行这个函数
_dispatch_sync_function_invoke_inline(dq, ctxt, func);
// 如果后面还有别的任务
if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
// 内部其实就是唤醒队列
return _dispatch_queue_barrier_complete(dq, 0, 0);
}

const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER |
DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
uint64_t old_state, new_state;

// 原子锁。检查dq->dq_state与old_state是否相等,如果相等把new_state赋值给dq->dq_state,如果不相等,把dq_state赋值给old_state。
// 串行队列走到这里,dq->dq_state与old_state是相等的,会把new_state也就是闭包里的赋值的值给dq->dq_state
os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
if (unlikely(old_state & fail_unlock_mask)) {
os_atomic_rmw_loop_give_up({
return _dispatch_queue_barrier_complete(dq, 0, 0);
});
}
});
if (_dq_state_is_base_wlh(old_state)) {
_dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
}
}

static inline void _dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
// 保护现场 -> 调用函数 -> 恢复现场
dispatch_thread_frame_s dtf;
_dispatch_thread_frame_push(&dtf, dq);
_dispatch_client_callout(ctxt, func);
_dispatch_perfmon_workitem_inc();
_dispatch_thread_frame_pop(&dtf);
}

  然后另一种情况,自定义并行队列会走_dispatch_sync_invoke_and_complete函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
static void _dispatch_sync_invoke_and_complete(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
_dispatch_sync_function_invoke_inline(dq, ctxt, func);
// 将自定义队列加入到root队列里去
// dispatch_async也会调用此方法,之前我们初始化的时候会绑定一个root队列,这里就将我们新建的队列交给root队列进行管理
_dispatch_queue_non_barrier_complete(dq);
}

static inline void _dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
dispatch_thread_frame_s dtf;
_dispatch_thread_frame_push(&dtf, dq);
// 执行任务
_dispatch_client_callout(ctxt, func);
_dispatch_perfmon_workitem_inc();
_dispatch_thread_frame_pop(&dtf);
}

0x05 dispatch_async

  内部就是两个函数_dispatch_continuation_init和_dispatch_continuation_async

1
2
3
4
5
6
7
8
9
void dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
// 设置标识位
uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;

_dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags);
_dispatch_continuation_async(dq, dc);
}

  _dispatch_continuation_init函数只是一个初始化,主要就是保存Block上下文,指定block的执行函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
static inline void _dispatch_continuation_init(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, dispatch_block_t work,
pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags)
{
dc->dc_flags = dc_flags | DISPATCH_OBJ_BLOCK_BIT;
// block对象赋值到dc_ctxt
dc->dc_ctxt = _dispatch_Block_copy(work);
// 设置默认任务优先级
_dispatch_continuation_priority_set(dc, pp, flags);

// 大多数情况不会走这个分支
if (unlikely(_dispatch_block_has_private_data(work))) {
return _dispatch_continuation_init_slow(dc, dqu, flags);
}

// 这个标识位多眼熟,就是前面入口赋值的,没的跑了,指定执行函数就是_dispatch_call_block_and_release了
if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
dc->dc_func = _dispatch_call_block_and_release;
} else {
dc->dc_func = _dispatch_Block_invoke(work);
}
_dispatch_continuation_voucher_set(dc, dqu, flags);
}

  _dispatch_call_block_and_release这个函数就是直接执行block了,所以dc->dc_func被调用的话就block会被直接执行了。

1
2
3
4
5
6
void _dispatch_call_block_and_release(void *block)
{
void (^b)(void) = block;
b();
Block_release(b);
}

  上面的初始化过程就是这样,接着看下_dispatch_continuation_async函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
void _dispatch_continuation_async(dispatch_queue_t dq, dispatch_continuation_t dc)
{
// 看看是不是barrier类型的block
_dispatch_continuation_async2(dq, dc,
dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT);
}

static inline void _dispatch_continuation_async2(dispatch_queue_t dq, dispatch_continuation_t dc,
bool barrier)
{
// 如果是用barrier插进来的任务或者是串行队列,直接将任务加入到队列
// #define DISPATCH_QUEUE_USES_REDIRECTION(width) \
// ({ uint16_t _width = (width); \
// _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
if (fastpath(barrier || !DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) {
return _dispatch_continuation_push(dq, dc);
}
return _dispatch_async_f2(dq, dc);
}

// 可以先看下如果是barrier任务,直接调用_dispatch_continuation_push函数
static void _dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc)
{
dx_push(dq, dc, _dispatch_continuation_override_qos(dq, dc));
}

// _dispatch_continuation_async2函数里面调用_dispatch_async_f2函数
static void
_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc)
{
// 如果还有任务,slowpath表示很大可能队尾是没有任务的。
// 实际开发中也的确如此,一般情况下我们不会dispatch_async之后又马上跟着一个dispatch_async
if (slowpath(dq->dq_items_tail)) {
return _dispatch_continuation_push(dq, dc);
}

if (slowpath(!_dispatch_queue_try_acquire_async(dq))) {
return _dispatch_continuation_push(dq, dc);
}

// 一般会直接来到这里,_dispatch_continuation_override_qos函数里面主要做的是判断dq有没有设置的优先级,如果没有就用block对象的优先级,如果有就用自己的
return _dispatch_async_f_redirect(dq, dc,
_dispatch_continuation_override_qos(dq, dc));
}

static void _dispatch_async_f_redirect(dispatch_queue_t dq,
dispatch_object_t dou, dispatch_qos_t qos)
{
// 这里会走进if的语句,因为_dispatch_object_is_redirection内部的dx_type(dou._do) == type条件为否
if (!slowpath(_dispatch_object_is_redirection(dou))) {
dou._dc = _dispatch_async_redirect_wrap(dq, dou);
}
// dq换成所绑定的root队列
dq = dq->do_targetq;

// 基本不会走里面的循环,主要做的就是找到根root队列
while (slowpath(DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) {
if (!fastpath(_dispatch_queue_try_acquire_async(dq))) {
break;
}
if (!dou._dc->dc_ctxt) {
dou._dc->dc_ctxt = (void *)
(uintptr_t)_dispatch_queue_autorelease_frequency(dq);
}
dq = dq->do_targetq;
}

// 把装有block信息的结构体装进所在队列对应的root_queue里面
dx_push(dq, dou, qos);
}

// dx_push是个宏定义,这里做的就是将任务push到任务队列,我们看到这里,就知道dx_push就是调用对象的do_push。
#define dx_push(x, y, z) dx_vtable(x)->do_push(x, y, z)
#define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable)

  _dispatch_async_f_redirect函数里先看这句dou._dc = _dispatch_async_redirect_wrap(dq, dou);

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
static inline dispatch_continuation_t _dispatch_async_redirect_wrap(dispatch_queue_t dq, dispatch_object_t dou)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();

dou._do->do_next = NULL;
// 所以dispatch_async推进的任务的do_vtable成员变量是有值的
dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT);
dc->dc_func = NULL;
dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq);
// 所属队列被装进dou._dc->dc_data里面了
dc->dc_data = dq;
dc->dc_other = dou._do;
dc->dc_voucher = DISPATCH_NO_VOUCHER;
dc->dc_priority = DISPATCH_NO_PRIORITY;
_dispatch_retain(dq); // released in _dispatch_async_redirect_invoke
return dc;
}

// dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT); 就是下面指定redirect的invoke函数是_dispatch_async_redirect_invoke,后面任务被执行就是通过这个函数
const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = {
DC_VTABLE_ENTRY(ASYNC_REDIRECT,
.do_kind = "dc-redirect",
.do_invoke = _dispatch_async_redirect_invoke),
#if HAVE_MACH
DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN,
.do_kind = "dc-mach-send-drain",
.do_invoke = _dispatch_mach_send_barrier_drain_invoke),
DC_VTABLE_ENTRY(MACH_SEND_BARRIER,
.do_kind = "dc-mach-send-barrier",
.do_invoke = _dispatch_mach_barrier_invoke),
DC_VTABLE_ENTRY(MACH_RECV_BARRIER,
.do_kind = "dc-mach-recv-barrier",
.do_invoke = _dispatch_mach_barrier_invoke),
DC_VTABLE_ENTRY(MACH_ASYNC_REPLY,
.do_kind = "dc-mach-async-reply",
.do_invoke = _dispatch_mach_msg_async_reply_invoke),
#endif
#if HAVE_PTHREAD_WORKQUEUE_QOS
DC_VTABLE_ENTRY(OVERRIDE_STEALING,
.do_kind = "dc-override-stealing",
.do_invoke = _dispatch_queue_override_invoke),
// 留意这个,后面也会被用到
DC_VTABLE_ENTRY(OVERRIDE_OWNING,
.do_kind = "dc-override-owning",
.do_invoke = _dispatch_queue_override_invoke),
#endif
};

  再看dx_push(dq, dou, qos);这句,其实就是调用_dispatch_root_queue_push函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
void _dispatch_root_queue_push(dispatch_queue_t rq, dispatch_object_t dou,
dispatch_qos_t qos)
{
// 一般情况下,无论自定义还是非自定义都会走进这个条件式(比如:dispatch_get_global_queue)
// 里面主要对比的是qos与root队列的qos是否一致。基本上都不一致的,如果不一致走进这个if语句
if (_dispatch_root_queue_push_needs_override(rq, qos)) {
return _dispatch_root_queue_push_override(rq, dou, qos);
}
_dispatch_root_queue_push_inline(rq, dou, dou, 1);
}

static void _dispatch_root_queue_push_override(dispatch_queue_t orig_rq,
dispatch_object_t dou, dispatch_qos_t qos)
{
bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit);
dispatch_continuation_t dc = dou._dc;
// 这个_dispatch_object_is_redirection函数其实就是return _dispatch_object_has_type(dou,DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT));
// 所以自定义队列会走这个if语句,如果是dispatch_get_global_queue不会走if语句
if (_dispatch_object_is_redirection(dc)) {
dc->dc_func = (void *)orig_rq;
} else {
// dispatch_get_global_queue来到这里
dc = _dispatch_continuation_alloc();
// 相当于是下面的,也就是指定了执行函数为_dispatch_queue_override_invoke,所以有别于自定义队列的invoke函数。
// DC_VTABLE_ENTRY(OVERRIDE_OWNING,
// .do_kind = "dc-override-owning",
// .do_invoke = _dispatch_queue_override_invoke),
dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING);
_dispatch_trace_continuation_push(orig_rq, dou);
dc->dc_ctxt = dc;
dc->dc_other = orig_rq;
dc->dc_data = dou._do;
dc->dc_priority = DISPATCH_NO_PRIORITY;
dc->dc_voucher = DISPATCH_NO_VOUCHER;
}
_dispatch_root_queue_push_inline(rq, dc, dc, 1);
}

static inline void _dispatch_root_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _head,
dispatch_object_t _tail, int n)
{
struct dispatch_object_s *head = _head._do, *tail = _tail._do;
// 把任务装进队列,大多数不走进if语句。但是第一个任务进来之前还是满足这个条件式的,会进入这个条件语句去激活队列来执行里面的任务,后面再加入的任务因为队列被激活了,所以也就不太需要再进入这个队列了,所以相对来说激活队列只要一次,所以作者认为大多数情况下不需要走进这个条件语句
if (unlikely(_dispatch_queue_push_update_tail_list(dq, head, tail))) {
// 保存队列头
_dispatch_queue_push_update_head(dq, head);
return _dispatch_global_queue_poke(dq, n, 0);
}
}

  至此,我们可以看到,我们装入到自定义的任务都被扔到其挂靠的root队列里去了,所以我们我们自己创建的队列只是一个代理人身份,真正的管理人是其对应的root队列,但同时这个队列也是被管理的。
  继续看_dispatch_global_queue_poke函数

1
2
3
4
5
void
_dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor)
{
return _dispatch_global_queue_poke_slow(dq, n, floor);
}

  继续看_dispatch_global_queue_poke函数调用了_dispatch_global_queue_poke_slow函数,这里也很关键了,里面执行_pthread_workqueue_addthreads函数,把任务交给内核分发处理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
_dispatch_global_queue_poke_slow(dispatch_queue_t dq, int n, int floor)
{
dispatch_root_queue_context_t qc = dq->do_ctxt;
int remaining = n;
int r = ENOSYS;

_dispatch_root_queues_init();
_dispatch_debug_root_queue(dq, __func__);
if (qc->dgq_kworkqueue != (void*)(~0ul))
{
r = _pthread_workqueue_addthreads(remaining,
_dispatch_priority_to_pp(dq->dq_priority));
(void)dispatch_assume_zero(r);
return;
}
}

int
_pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
{
int res = 0;

if (__libdispatch_workerfunction == NULL) {
return EPERM;
}

if ((__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) == 0) {
return ENOTSUP;
}

res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
if (res == -1) {
res = errno;
}
return res;
}

  那么,加入到根队列的任务是怎么被运行起来的?在此之前,我们先模拟一下在GCD内部把程序搞挂掉,这样我们就可以追溯下调用栈关系。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
(
0 CoreFoundation 0x00000001093fe12b __exceptionPreprocess + 171
1 libobjc.A.dylib 0x0000000108a92f41 objc_exception_throw + 48
2 CoreFoundation 0x000000010943e0cc _CFThrowFormattedException + 194
3 CoreFoundation 0x000000010930c23d -[__NSPlaceholderArray initWithObjects:count:] + 237
4 CoreFoundation 0x0000000109312e34 +[NSArray arrayWithObjects:count:] + 52
5 HotPatch 0x000000010769df77 __29-[ViewController viewDidLoad]_block_invoke + 87
6 libdispatch.dylib 0x000000010c0a62f7 _dispatch_call_block_and_release + 12
7 libdispatch.dylib 0x000000010c0a733d _dispatch_client_callout + 8
8 libdispatch.dylib 0x000000010c0ad754 _dispatch_continuation_pop + 967
9 libdispatch.dylib 0x000000010c0abb85 _dispatch_async_redirect_invoke + 780
10 libdispatch.dylib 0x000000010c0b3102 _dispatch_root_queue_drain + 772
11 libdispatch.dylib 0x000000010c0b2da0 _dispatch_worker_thread3 + 132
12 libsystem_pthread.dylib 0x000000010c5f95a2 _pthread_wqthread + 1299
13 libsystem_pthread.dylib 0x000000010c5f907d
start_wqthread + 13
)

  很明显,我们已经看到加入到队列的任务的调用关系是:
start_wqthread -> _pthread_wqthread -> _dispatch_worker_thread3 -> _dispatch_root_queue_drain -> _dispatch_async_redirect_invoke -> _dispatch_continuation_pop -> _dispatch_client_callout -> _dispatch_call_block_and_release
  只看调用关系也不知道里面做了什么,所以还是上代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
// 根据优先级取出相应的root队列,再调用_dispatch_worker_thread4函数
static void _dispatch_worker_thread3(pthread_priority_t pp)
{
bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
dispatch_queue_t dq;
pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
_dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp);
dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit);
return _dispatch_worker_thread4(dq);
}
// 开始调用_dispatch_root_queue_drain函数,取出任务
static void _dispatch_worker_thread4(void *context)
{
dispatch_queue_t dq = context;
dispatch_root_queue_context_t qc = dq->do_ctxt;

_dispatch_introspection_thread_add();
int pending = os_atomic_dec2o(qc, dgq_pending, relaxed);
dispatch_assert(pending >= 0);
_dispatch_root_queue_drain(dq, _dispatch_get_priority());
_dispatch_voucher_debug("root queue clear", NULL);
_dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK);
}
// 循环取出任务
static void _dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pp)
{
_dispatch_queue_set_current(dq);
dispatch_priority_t pri = dq->dq_priority;
if (!pri) pri = _dispatch_priority_from_pp(pp);
dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
_dispatch_adopt_wlh_anon();

struct dispatch_object_s *item;
bool reset = false;
dispatch_invoke_context_s dic = { };
dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN |
DISPATCH_INVOKE_REDIRECTING_DRAIN;
_dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri);
_dispatch_perfmon_start();
while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) {
if (reset) _dispatch_wqthread_override_reset();
_dispatch_continuation_pop_inline(item, &dic, flags, dq);
reset = _dispatch_reset_basepri_override();
if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) {
break;
}
}

// overcommit or not. worker thread
if (pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
_dispatch_perfmon_end(perfmon_thread_worker_oc);
} else {
_dispatch_perfmon_end(perfmon_thread_worker_non_oc);
}

_dispatch_reset_wlh();
_dispatch_reset_basepri(old_dbp);
_dispatch_reset_basepri_override();
_dispatch_queue_set_current(NULL);
}

// 这个函数的作用就是调度出任务的执行函数
static inline void _dispatch_continuation_pop_inline(dispatch_object_t dou,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
dispatch_queue_t dq)
{
dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
_dispatch_get_pthread_root_queue_observer_hooks();
if (observer_hooks) observer_hooks->queue_will_execute(dq);
_dispatch_trace_continuation_pop(dq, dou);
flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;

// 之前说过dispatch_async是有do_vtable成员变量的,所以会走进这个if分支,又invoke方法指定为_dispatch_async_redirect_invoke,所以执行该函数
// 相同的,如果是dispatch_get_global_queue也会走这个分支,执行_dispatch_queue_override_invoke方法,这个之前也说过了
if (_dispatch_object_has_vtable(dou)) {
dx_invoke(dou._do, dic, flags);
} else {
_dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags);
}
if (observer_hooks) observer_hooks->queue_did_execute(dq);
}

// 继续按自定义队列的步骤走
void _dispatch_async_redirect_invoke(dispatch_continuation_t dc,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags)
{
dispatch_thread_frame_s dtf;
struct dispatch_continuation_s *other_dc = dc->dc_other;
dispatch_invoke_flags_t ctxt_flags = (dispatch_invoke_flags_t)dc->dc_ctxt;

dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func;
dispatch_queue_t dq = dc->dc_data, rq, old_dq;
dispatch_priority_t old_dbp;

if (ctxt_flags) {
flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK;
flags |= ctxt_flags;
}
old_dq = _dispatch_get_current_queue();
if (assumed_rq) {
old_dbp = _dispatch_root_queue_identity_assume(assumed_rq);
_dispatch_set_basepri(dq->dq_priority);
} else {
old_dbp = _dispatch_set_basepri(dq->dq_priority);
}

_dispatch_thread_frame_push(&dtf, dq);
// _dispatch_continuation_pop_forwarded里面就是执行_dispatch_continuation_pop函数
_dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER,
DISPATCH_OBJ_CONSUME_BIT, {
_dispatch_continuation_pop(other_dc, dic, flags, dq);
});
_dispatch_thread_frame_pop(&dtf);
if (assumed_rq) _dispatch_queue_set_current(old_dq);
_dispatch_reset_basepri(old_dbp);

rq = dq->do_targetq;
while (slowpath(rq->do_targetq) && rq != old_dq) {
_dispatch_queue_non_barrier_complete(rq);
rq = rq->do_targetq;
}

_dispatch_queue_non_barrier_complete(dq);
_dispatch_release_tailcall(dq);
}

// 顺便说下,如果按照的是dispatch_get_global_queue会执行_dispatch_queue_override_invoke函数
void _dispatch_queue_override_invoke(dispatch_continuation_t dc,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags)
{
dispatch_queue_t old_rq = _dispatch_queue_get_current();
dispatch_queue_t assumed_rq = dc->dc_other;
dispatch_priority_t old_dp;
voucher_t ov = DISPATCH_NO_VOUCHER;
dispatch_object_t dou;

dou._do = dc->dc_data;
old_dp = _dispatch_root_queue_identity_assume(assumed_rq);
if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) {
flags |= DISPATCH_INVOKE_STEALING;
} else {
// balance the fake continuation push in
// _dispatch_root_queue_push_override
_dispatch_trace_continuation_pop(assumed_rq, dou._do);
}
// 同样调用_dispatch_continuation_pop函数
_dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, {
if (_dispatch_object_has_vtable(dou._do)) {
dx_invoke(dou._do, dic, flags);
} else {
_dispatch_continuation_invoke_inline(dou, ov, flags);
}
});
_dispatch_reset_basepri(old_dp);
_dispatch_queue_set_current(old_rq);
}

// 回归正题,无论是自定义的队列还是获取系统的,最终都会调用这个函数
void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic,
dispatch_invoke_flags_t flags, dispatch_queue_t dq)
{
_dispatch_continuation_pop_inline(dou, dic, flags, dq);
}

static inline void _dispatch_continuation_pop_inline(dispatch_object_t dou,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
dispatch_queue_t dq)
{
dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
_dispatch_get_pthread_root_queue_observer_hooks();
if (observer_hooks) observer_hooks->queue_will_execute(dq);
_dispatch_trace_continuation_pop(dq, dou);
flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
if (_dispatch_object_has_vtable(dou)) {
dx_invoke(dou._do, dic, flags);
} else {
_dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags);
}
if (observer_hooks) observer_hooks->queue_did_execute(dq);
}

static inline void _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov,
dispatch_invoke_flags_t flags)
{
dispatch_continuation_t dc = dou._dc, dc1;
dispatch_invoke_with_autoreleasepool(flags, {
uintptr_t dc_flags = dc->dc_flags;

_dispatch_continuation_voucher_adopt(dc, ov, dc_flags);
if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
dc1 = _dispatch_continuation_free_cacheonly(dc);
} else {
dc1 = NULL;
}
// 后面分析dispatch_group_async的时候会走if这个分支,但这次走的是else分支
if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) {
_dispatch_continuation_with_group_invoke(dc);
} else {
// 这次走这里,直接执行block函数
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
_dispatch_introspection_queue_item_complete(dou);
}
if (unlikely(dc1)) {
_dispatch_continuation_free_to_cache_limit(dc1);
}
});
_dispatch_perfmon_workitem_inc();
}

  至此,任务怎么被调度执行的已经看明白了。start_wqthread是汇编写的,直接和内核交互。虽然我们明确了使用了异步的任务被执行的调用顺序,但是想必还是有这样的疑问_dispatch_worker_thread3是怎么跟内核扯上关系的。为什么调用的是_dispatch_worker_thread3,而不是_dispatch_worker_thread或者_dispatch_worker_thread4呢?
  在此之前需要说的是,在GCD中一共有2个线程池管理着任务,一个是主线程池,另一个就是除了主线程任务的线程池。主线程池由序号1的队列管理,其他有序号2的队列进行管理。加上runloop运行的runloop队列,一共就有16个队列。

序号 标签
1 com.apple.main-thread
2 com.apple.libdispatch-manager
3 com.apple.root.libdispatch-manager
4 com.apple.root.maintenance-qos
5 com.apple.root.maintenance-qos.overcommit
6 com.apple.root.background-qos
7 com.apple.root.background-qos.overcommit
8 com.apple.root.utility-qos
9 com.apple.root.utility-qos.overcommit
10 com.apple.root.default-qos
11 com.apple.root.default-qos.overcommit
12 com.apple.root.user-initiated-qos
13 com.apple.root.user-initiated-qos.overcommit
14 com.apple.root.user-interactive-qos
15 com.apple.root.user-interactive-qos.overcommit

  看图的话,就如下图线程池图
  有那么多root队列,所以application启动的时候就会初始化这些root队列的_dispatch_root_queues_init函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
void
_dispatch_root_queues_init(void)
{
static dispatch_once_t _dispatch_root_queues_pred;
dispatch_once_f(&_dispatch_root_queues_pred, NULL,
_dispatch_root_queues_init_once);
}

static void
_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED)
{
int wq_supported;
_dispatch_fork_becomes_unsafe();
if (!_dispatch_root_queues_init_workq(&wq_supported)) {
size_t i;
for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
bool overcommit = true;
_dispatch_root_queue_init_pthread_pool(
&_dispatch_root_queue_contexts[i], 0, overcommit);
}
DISPATCH_INTERNAL_CRASH((errno << 16) | wq_supported,
"Root queue initialization failed");
}
}

static inline bool
_dispatch_root_queues_init_workq(int *wq_supported)
{
int r; (void)r;
bool result = false;
*wq_supported = 0;
bool disable_wq = false; (void)disable_wq;
bool disable_qos = false;
bool disable_kevent_wq = false;
if (!disable_wq && !disable_qos) {
*wq_supported = _pthread_workqueue_supported();
if (!disable_kevent_wq && (*wq_supported & WORKQ_FEATURE_KEVENT)) {
r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread3,
(pthread_workqueue_function_kevent_t)
_dispatch_kevent_worker_thread,
offsetof(struct dispatch_queue_s, dq_serialnum), 0);
result = !r;
}
}
return result;
}

  来到这里,已经看到_pthread_workqueue_init_with_kevent函数就是绑定了_dispatch_worker_thread3函数去做一些GCD的线程任务,看到源代码_pthread_workqueue_init_with_kevent做了些什么。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
int
_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
pthread_workqueue_function_kevent_t kevent_func,
int offset, int flags)
{
return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
}

int
_pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
pthread_workqueue_function_kevent_t kevent_func,
pthread_workqueue_function_workloop_t workloop_func,
int offset, int flags)
{
if (flags != 0) {
return ENOTSUP;
}

__workq_newapi = true;
__libdispatch_offset = offset;

int rv = pthread_workqueue_setdispatch_with_workloop_np(queue_func, kevent_func, workloop_func);
return rv;
}

static int
pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func,
pthread_workqueue_function_kevent_t kevent_func,
pthread_workqueue_function_workloop_t workloop_func)
{
int res = EBUSY;
if (__libdispatch_workerfunction == NULL) {
// Check whether the kernel supports new SPIs
res = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, __libdispatch_offset, kevent_func != NULL ? 0x01 : 0x00);
if (res == -1){
res = ENOTSUP;
} else {
__libdispatch_workerfunction = queue_func;
__libdispatch_keventfunction = kevent_func;
__libdispatch_workloopfunction = workloop_func;

// Prepare the kernel for workq action
(void)__workq_open();
if (__is_threaded == 0) {
__is_threaded = 1;
}
}
}
return res;
}

  我们看到了__libdispatch_workerfunction = queue_func;指定了队列工作函数。然后我们往回看之前说的我们制造了一个人为crash,追溯栈里看到_pthread_wqthread这个函数。看下这个函数怎么启用_dispatch_worker_thread3的

1
2
3
4
5
6
7
8
9
10
11
// 实际代码很多,这里我精简了下,拿到了__libdispatch_workerfunction对应的_dispatch_worker_thread3,然后直接执行。
void
_pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *keventlist, int flags, int nkevents)
{
pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
int options = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
// 执行函数
(*func)(thread_class, options, NULL);
__workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
_pthread_exit(self, NULL);
}

0x05 dispatch_group_async

  同样从入口看起

1
2
3
4
5
6
7
8
9
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_block_t db)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_GROUP_BIT;

_dispatch_continuation_init(dc, dq, db, 0, 0, dc_flags);
_dispatch_continuation_group_async(dg, dq, dc);
}

  同样是_dispatch_continuation_init函数,这里跟dispatch_async那里一毛一样,忘记了的话,往回看。我们接着往下看,_dispatch_continuation_group_async函数

1
2
3
4
5
6
7
8
static inline void
_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_continuation_t dc)
{
dispatch_group_enter(dg);
dc->dc_data = dg;
_dispatch_continuation_async(dq, dc);
}

  我们发现,其实dispatch_group_async内部也是加了dispatch_group_enter函数。dispatch_group_async怎么初始化我们至此已经说明完毕。
  后面取出执行block逻辑跟dispatch_async略微不同,前面部分不做多说,调用顺序跟dispatch_async是一样的,唯一不同在于_dispatch_continuation_invoke_inline这个函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
static inline void _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov,
dispatch_invoke_flags_t flags)
{
dispatch_continuation_t dc = dou._dc, dc1;
dispatch_invoke_with_autoreleasepool(flags, {
uintptr_t dc_flags = dc->dc_flags;

_dispatch_continuation_voucher_adopt(dc, ov, dc_flags);
if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
dc1 = _dispatch_continuation_free_cacheonly(dc);
} else {
dc1 = NULL;
}
// 这次走if语句
if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) {
_dispatch_continuation_with_group_invoke(dc);
} else {
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
_dispatch_introspection_queue_item_complete(dou);
}
if (unlikely(dc1)) {
_dispatch_continuation_free_to_cache_limit(dc1);
}
});
_dispatch_perfmon_workitem_inc();
}

static inline void
_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
{
struct dispatch_object_s *dou = dc->dc_data;
unsigned long type = dx_type(dou);
if (type == DISPATCH_GROUP_TYPE) {
// 执行任务
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
_dispatch_introspection_queue_item_complete(dou);
// 调用dispatch_group_leave
dispatch_group_leave((dispatch_group_t)dou);
} else {
DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
}
}

  我们有必要看下dispatch_group_enter和dispatch_group_leave函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
// dispatch_group_enter里面没啥说的,也就dg->dg_value值加1
void dispatch_group_enter(dispatch_group_t dg)
{
long value = os_atomic_inc_orig2o(dg, dg_value, acquire);
if (slowpath((unsigned long)value >= (unsigned long)LONG_MAX)) {
DISPATCH_CLIENT_CRASH(value,
"Too many nested calls to dispatch_group_enter()");
}
if (value == 0) {
_dispatch_retain(dg);
}
}

void dispatch_group_leave(dispatch_group_t dg)
{
long value = os_atomic_dec2o(dg, dg_value, release);
// 如果没有等待者,则调用_dispatch_group_wake函数
if (slowpath(value == 0)) {
return (void)_dispatch_group_wake(dg, true);
}
// 小于0就会crash,所以dispatch_group_enter和dispatch_group_leave必须匹配,不然就crash了。
if (slowpath(value < 0)) {
DISPATCH_CLIENT_CRASH(value,
"Unbalanced call to dispatch_group_leave()");
}
}

static long _dispatch_group_wake(dispatch_group_t dg, bool needs_release)
{
dispatch_continuation_t next, head, tail = NULL;
long rval;


head = os_atomic_xchg2o(dg, dg_notify_head, NULL, relaxed);
if (head) {
tail = os_atomic_xchg2o(dg, dg_notify_tail, NULL, release);
}
// dg->dg_waiters赋值为0,并返回dg->dg_waiters之前的值
rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed);
// 如果之前还有等待者
if (rval) {
// 创建信号量
_dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
// 发出信号
_dispatch_sema4_signal(&dg->dg_sema, rval);
}
uint16_t refs = needs_release ? 1 : 0;
// dispatch_group里是否有任务等待执行,有的话加入。
// 比如dispatch_group_notify的任务就在此时被唤醒
if (head) {

do {
next = os_mpsc_pop_snapshot_head(head, tail, do_next);
dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data;
_dispatch_continuation_async(dsn_queue, head);
_dispatch_release(dsn_queue);
} while ((head = next));
refs++;
}
if (refs) _dispatch_release_n(dg, refs);
return 0;
}

0x06 dispatch_once

  还是从入口函数开始看

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
// 我们调用dispatch_once的入口
void dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
// 内部又调用了dispatch_once_f函数
dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}

DISPATCH_NOINLINE
void dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) {
return dispatch_once_f_slow(val, ctxt, func);
}

DISPATCH_ONCE_SLOW_INLINE
static void
dispatch_once_f_slow(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
// _dispatch_once_waiter_t格式:
// typedef struct _dispatch_once_waiter_s {
// volatile struct _dispatch_once_waiter_s *volatile dow_next;
// dispatch_thread_event_s dow_event;
// mach_port_t dow_thread;
// } *_dispatch_once_waiter_t;


// volatile:告诉编译器不要对此指针进行代码优化,因为这个指针指向的值可能会被其他线程改变
_dispatch_once_waiter_t volatile *vval = (_dispatch_once_waiter_t*)val;
struct _dispatch_once_waiter_s dow = { };
_dispatch_once_waiter_t tail = &dow, next, tmp;
dispatch_thread_event_t event;

// 第一次执行时,*vval为0,此时第一个参数vval和第二个参数NULL比较是相等的,返回true,然后把tail赋值给第一个参数的值。如果这时候同时有别的线程也进来,此时vval的值不是0了,所以会来到else分支。
if (os_atomic_cmpxchg(vval, NULL, tail, acquire)) {
// 获取当前线程
dow.dow_thread = _dispatch_tid_self();
// 调用block函数,一般就是我们在外面做的初始化工作
_dispatch_client_callout(ctxt, func);

// 内部将DLOCK_ONCE_DONE赋值给val,将当前标记为已完成,返回之前的引用值。前面说过了,把tail赋值给val了,但这只是没有别的线程进来走到下面else分支,如果有别的线程进来next就是别的值了,如果没有别的信号量在等待,工作就到此结束了。
next = (_dispatch_once_waiter_t)_dispatch_once_xchg_done(val);
// 如果没有别的线程进来过处于等待,这里就会结束。如果有,则遍历每一个等待的信号量,然后一个个唤醒它们
while (next != tail) {
// 内部用到了thread_switch,避免优先级反转。把next->dow_next返回
tmp = (_dispatch_once_waiter_t)_dispatch_wait_until(next->dow_next);
event = &next->dow_event;
next = tmp;
// 唤醒信号量
_dispatch_thread_event_signal(event);
}
} else {
// 内部就是_dispatch_sema4_init函数,也就是初始化一个信号链表
_dispatch_thread_event_init(&dow.dow_event);
// next指向新的原子
next = *vval;
// 不断循环等待
for (;;) {
// 前面说过第一次进来后进入if分支,后面再次进来,会来到这里,但是之前if里面被标志为DISPATCH_ONCE_DONE了,所以结束。
if (next == DISPATCH_ONCE_DONE) {
break;
}
// 当第一次初始化的时候,同时有别的线程也进来,这是第一个线程已经占据了if分支,但其他线程也是第一进来,所以状态并不是DISPATCH_ONCE_DONE,所以就来到了这里
// 比较vval和next是否一样,其他线程第一次来这里肯定是相等的
if (os_atomic_cmpxchgv(vval, next, tail, &next, release)) {
dow.dow_thread = next->dow_thread;
dow.dow_next = next;
if (dow.dow_thread) {
pthread_priority_t pp = _dispatch_get_priority();
_dispatch_thread_override_start(dow.dow_thread, pp, val);
}
// 等待唤醒,唤醒后就做收尾操作
_dispatch_thread_event_wait(&dow.dow_event);
if (dow.dow_thread) {

_dispatch_thread_override_end(dow.dow_thread, val);
}
break;
}
}
// 销毁信号量
_dispatch_thread_event_destroy(&dow.dow_event);
}
}

  那么,回到上篇提到使用dispatch_once死锁的问题,如果使用不当会造成什么后果?回顾下上篇的实验代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
- (void)viewDidLoad {
[super viewDidLoad];

[self once];
}

- (void)once {
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
[self otherOnce];
});
NSLog(@"遇到第一只熊猫宝宝...");
}

- (void)otherOnce {
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
[self once];
});
NSLog(@"遇到第二只熊猫宝宝...");
}

  示例中我们可以看到once方法需要等待otherOnce方法的完成,而otherOnce又调用了once,根据前面的源码,otherOnce调用once方法会走到else分支,在这个分支等待之前一个信号量发出唤醒指令,但是once方法里面又依赖otherOnce方法的完成,由于处于一个线程,所以就卡住了。

0x06 dispatch_group_create & dispatch_semaphore_create

  为什么两个一起看,其实dispatch_group也是通过dispatch_semaphore控制的,看下dispatch_group_create源代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
dispatch_group_t
dispatch_group_create(void)
{
return _dispatch_group_create_with_count(0);
}

static inline dispatch_group_t
_dispatch_group_create_with_count(long count)
{
dispatch_group_t dg = (dispatch_group_t)_dispatch_object_alloc(
DISPATCH_VTABLE(group), sizeof(struct dispatch_group_s));
_dispatch_semaphore_class_init(count, dg); // 初始化信号量
if (count) {
os_atomic_store2o(dg, do_ref_cnt, 1, relaxed);
}
return dg;
}

  同样的,看下dispatch_semaphore_create源代码,是不是一股熟悉的配方:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
    dispatch_semaphore_t
dispatch_semaphore_create(long value)
{
dispatch_semaphore_t dsema;

if (value < 0) {
return DISPATCH_BAD_INPUT;
}

dsema = (dispatch_semaphore_t)_dispatch_object_alloc(
DISPATCH_VTABLE(semaphore), sizeof(struct dispatch_semaphore_s));
_dispatch_semaphore_class_init(value, dsema); // 同样的初始化信号量
dsema->dsema_orig = value;
return dsema;
}

0x07 dispatch_group_wait & dispatch_semaphore_wait

  
再看下dispatch_group_wait的代码,其内部是调用的_dispatch_group_wait_slow函数:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
static long _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout)
{
long value;
int orig_waiters;

value = os_atomic_load2o(dg, dg_value, ordered);
if (value == 0) {
return _dispatch_group_wake(dg, false);
}

(void)os_atomic_inc2o(dg, dg_waiters, relaxed);

value = os_atomic_load2o(dg, dg_value, ordered);
// 如果group里没有任务
if (value == 0) {
_dispatch_group_wake(dg, false);

timeout = DISPATCH_TIME_FOREVER;
}

_dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
switch (timeout) {
default:
if (!_dispatch_sema4_timedwait(&dg->dg_sema, timeout)) {
break;
}

case DISPATCH_TIME_NOW:
orig_waiters = dg->dg_waiters;
while (orig_waiters) {
if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters,
orig_waiters - 1, &orig_waiters, relaxed)) {
return _DSEMA4_TIMEOUT();
}
}

case DISPATCH_TIME_FOREVER:
_dispatch_sema4_wait(&dg->dg_sema);
break;
}
return 0;
}

  对比着看dispatch_semaphore_wait源码,其内部也调用_dispatch_semaphore_wait_slow函数,可以看到逻辑基本一致:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
static long _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
dispatch_time_t timeout)
{
long orig;

_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
switch (timeout) {
default:
if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {
break;
}

case DISPATCH_TIME_NOW:
orig = dsema->dsema_value;
while (orig < 0) {
if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1,
&orig, relaxed)) {
return _DSEMA4_TIMEOUT();
}
}

case DISPATCH_TIME_FOREVER:
_dispatch_sema4_wait(&dsema->dsema_sema);
break;
}
return 0;
}

0x08 dispatch_group_notify

  再把dispatch_group_notify看下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
void
dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_block_t db)
{
dispatch_continuation_t dsn = _dispatch_continuation_alloc();
// 之前说过了
_dispatch_continuation_init(dsn, dq, db, 0, 0, DISPATCH_OBJ_CONSUME_BIT);
_dispatch_group_notify(dg, dq, dsn);
}

static inline void _dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_continuation_t dsn)
{
dsn->dc_data = dq;
dsn->do_next = NULL;
_dispatch_retain(dq);
if (os_mpsc_push_update_tail(dg, dg_notify, dsn, do_next)) {
_dispatch_retain(dg);
os_atomic_store2o(dg, dg_notify_head, dsn, ordered);
// 如果此时group里面的任务都完成了,那么就立刻唤醒
if (os_atomic_load2o(dg, dg_value, ordered) == 0) {
_dispatch_group_wake(dg, false);
}
}
}

  在dispatch_group_async里面我们知道dispatch_group的任务在执行后会调用dispatch_group_leave。这个函数里面如果等待者没有了,就会唤醒dispatch_group。里面的任务,比如dispatch_group_notify的任务就会这时候被执行。
  这里执行的调用顺序就不贴了,基本跟dispatch_async一致。

0x09 dispatch_barrier_async

  可以看到,大多数实现都是大同小异,通过不同的标志位来控制。这里跟dispatch_async的不同就在于,dispatch_async直接把任务扔到root队列,而dispatch_barrier_async是把任务在到自定义的队列。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
void
dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT;

_dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags);
_dispatch_continuation_push(dq, dc);
}

void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
dispatch_qos_t qos)
{
_dispatch_queue_push_inline(dq, dou, qos);
}

static inline void _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
dispatch_qos_t qos)
{
struct dispatch_object_s *tail = _tail._do;
dispatch_wakeup_flags_t flags = 0;
bool overriding = _dispatch_queue_need_override_retain(dq, qos);
// 加入到自己的队列
if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
if (!overriding) _dispatch_retain_2(dq->_as_os_obj);
_dispatch_queue_push_update_head(dq, tail);
flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY;
} else if (overriding) {
flags = DISPATCH_WAKEUP_CONSUME_2;
} else {
return;
}
// 唤醒队列
return dx_wakeup(dq, qos, flags);
}

void _dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags)
{
dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;

if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
return _dispatch_queue_barrier_complete(dq, qos, flags);
}
// 内部就是 tail != NULL,所以满足条件
if (_dispatch_queue_class_probe(dq)) {
// #define DISPATCH_QUEUE_WAKEUP_TARGET ((dispatch_queue_wakeup_target_t)1)
target = DISPATCH_QUEUE_WAKEUP_TARGET;
}
return _dispatch_queue_class_wakeup(dq, qos, flags, target);
}

void _dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
{
dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT);

// 会走进去
if (target) {
uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED;
if (target == DISPATCH_QUEUE_WAKEUP_MGR) {
enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
}
qos = _dispatch_queue_override_qos(dq, qos);
os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
new_state = _dq_state_merge_qos(old_state, qos);
if (likely(!_dq_state_is_suspended(old_state) &&
!_dq_state_is_enqueued(old_state) &&
(!_dq_state_drain_locked(old_state) ||
(enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR &&
_dq_state_is_base_wlh(old_state))))) {
new_state |= enqueue;
}
if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) {
new_state |= DISPATCH_QUEUE_DIRTY;
} else if (new_state == old_state) {
os_atomic_rmw_loop_give_up(goto done);
}
});

if (likely((old_state ^ new_state) & enqueue)) {
dispatch_queue_t tq;
if (target == DISPATCH_QUEUE_WAKEUP_TARGET) {
os_atomic_thread_fence(dependency);
tq = os_atomic_load_with_dependency_on2o(dq, do_targetq,
(long)new_state);
} else {
tq = target;
}
dispatch_assert(_dq_state_is_enqueued(new_state));
// 把队列装入到root队列中,内部调用的_dispatch_root_queue_push函数
return _dispatch_queue_push_queue(tq, dq, new_state);
}
}
done:
if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) {
return _dispatch_release_2_tailcall(dq);
}
}

// _dispatch_root_queue_push函数在dispatch_async已经贴过代码,直接看_dispatch_root_queue_push_override函数
static void _dispatch_root_queue_push_override(dispatch_queue_t orig_rq,
dispatch_object_t dou, dispatch_qos_t qos)
{
bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit);
dispatch_continuation_t dc = dou._dc;
// 因为barrier是直接推进自己的队列,所以这里不会走if语句,具体注释可以看dispatch_async那里
if (_dispatch_object_is_redirection(dc)) {
dc->dc_func = (void *)orig_rq;
} else {
dc = _dispatch_continuation_alloc();
// 指定do_vtable,所以取出来执行的时候调用的是_dispatch_queue_override_invoke函数
dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING);
_dispatch_trace_continuation_push(orig_rq, dou);
dc->dc_ctxt = dc;
dc->dc_other = orig_rq;
dc->dc_data = dou._do;
dc->dc_priority = DISPATCH_NO_PRIORITY;
dc->dc_voucher = DISPATCH_NO_VOUCHER;
}
_dispatch_root_queue_push_inline(rq, dc, dc, 1);
}

// 后面也省略

  同样我们人为制造一个闪退,看下被调用顺序

1
2
3
4
5
6
7
8
9
6   libdispatch.dylib                   0x0000000105b952f7 _dispatch_call_block_and_release + 12
7 libdispatch.dylib 0x0000000105b9633d _dispatch_client_callout + 8
8 libdispatch.dylib 0x0000000105ba40a5 _dispatch_queue_concurrent_drain + 1492
9 libdispatch.dylib 0x0000000105b9f1fb _dispatch_queue_invoke + 353
10 libdispatch.dylib 0x0000000105b9af7c _dispatch_queue_override_invoke + 733
11 libdispatch.dylib 0x0000000105ba2102 _dispatch_root_queue_drain + 772
12 libdispatch.dylib 0x0000000105ba1da0 _dispatch_worker_thread3 + 132
13 libsystem_pthread.dylib 0x000000010605d5a2 _pthread_wqthread + 1299
14 libsystem_pthread.dylib 0x000000010605d07d start_wqthread + 13

  一样从_dispatch_root_queue_drain开始看

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
 static void _dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pp)
{
_dispatch_queue_set_current(dq);
dispatch_priority_t pri = dq->dq_priority;
if (!pri) pri = _dispatch_priority_from_pp(pp);
dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
_dispatch_adopt_wlh_anon();

struct dispatch_object_s *item;
bool reset = false;
dispatch_invoke_context_s dic = { };
dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN |
DISPATCH_INVOKE_REDIRECTING_DRAIN;
_dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri);
_dispatch_perfmon_start();
// rootqueue可以跟一个dispatch_queue_t也可以跟一个dispatch_continuation_t
// 所以这里item取出来的是dispatch_queue_t
while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) {
if (reset) _dispatch_wqthread_override_reset();
_dispatch_continuation_pop_inline(item, &dic, flags, dq);
// 重置当前线程的优先级,会跟内核交互
reset = _dispatch_reset_basepri_override();
if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) {
break;
}
}

// overcommit or not. worker thread
if (pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
_dispatch_perfmon_end(perfmon_thread_worker_oc);
} else {
_dispatch_perfmon_end(perfmon_thread_worker_non_oc);
}

_dispatch_reset_wlh();
_dispatch_reset_basepri(old_dbp);
_dispatch_reset_basepri_override();
_dispatch_queue_set_current(NULL);
}

static inline void _dispatch_continuation_pop_inline(dispatch_object_t dou,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
dispatch_queue_t dq)
{
dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
_dispatch_get_pthread_root_queue_observer_hooks();
if (observer_hooks) observer_hooks->queue_will_execute(dq);
_dispatch_trace_continuation_pop(dq, dou);
flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
// 调用_dispatch_queue_override_invoke函数
// 这里其实很好理解,从root队列拿出来的有可能是一个队列,也可能就是一个任务,所以如果是队列,就调用队列的执行函数
// 所以为什么官方文档说,不是自定义队列使用barrier无效,因为不是自定义队列,这里就直接走_dispatch_continuation_invoke_inline函数,调用函数实现了,也就是dispatch_barrier_async类似于dispatch_async了。
if (_dispatch_object_has_vtable(dou)) {
dx_invoke(dou._do, dic, flags);
} else {
_dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags);
}
if (observer_hooks) observer_hooks->queue_did_execute(dq);
}

void _dispatch_queue_override_invoke(dispatch_continuation_t dc,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags)
{
dispatch_queue_t old_rq = _dispatch_queue_get_current();
dispatch_queue_t assumed_rq = dc->dc_other;
dispatch_priority_t old_dp;
voucher_t ov = DISPATCH_NO_VOUCHER;
dispatch_object_t dou;

dou._do = dc->dc_data;
// 将自定义queue激活,其root队列挂起。将rootqueue保存到old_dq变量
// 所以这也就是为什么,barrier的任务可以提前执行,后面的任务会被阻塞
// static inline dispatch_priority_t
//_dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq)
//{
// dispatch_priority_t old_dbp = _dispatch_get_basepri();
// dispatch_assert(dx_hastypeflag(assumed_rq, QUEUE_ROOT));
// _dispatch_reset_basepri(assumed_rq->dq_priority);
// _dispatch_queue_set_current(assumed_rq);
// return old_dbp;
//}
old_dp = _dispatch_root_queue_identity_assume(assumed_rq);
if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) {
flags |= DISPATCH_INVOKE_STEALING;
} else {
_dispatch_trace_continuation_pop(assumed_rq, dou._do);
}
_dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, {
// 来到if分支,调用_dispatch_queue_invoke函数
if (_dispatch_object_has_vtable(dou._do)) {
dx_invoke(dou._do, dic, flags);
} else {
_dispatch_continuation_invoke_inline(dou, ov, flags);
}
});
// 重新激活root队列
_dispatch_reset_basepri(old_dp);
_dispatch_queue_set_current(old_rq);
}

void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_context_t dic,
dispatch_invoke_flags_t flags)
{
_dispatch_queue_class_invoke(dq, dic, flags, 0, dispatch_queue_invoke2);
}

static inline void _dispatch_queue_class_invoke(dispatch_object_t dou,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
dispatch_invoke_flags_t const_restrict_flags,
_dispatch_queue_class_invoke_handler_t invoke)
{
dispatch_queue_t dq = dou._dq;
dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
bool owning = !(flags & DISPATCH_INVOKE_STEALING);
uint64_t owned = 0;

if (!(flags & (DISPATCH_INVOKE_STEALING | DISPATCH_INVOKE_WLH))) {
dq->do_next = DISPATCH_OBJECT_LISTLESS;
}
flags |= const_restrict_flags;
if (likely(flags & DISPATCH_INVOKE_WLH)) {
owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED | DISPATCH_QUEUE_ENQUEUED;
} else {
owned = _dispatch_queue_drain_try_lock(dq, flags);
}
if (likely(owned)) {
dispatch_priority_t old_dbp;
if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
old_dbp = _dispatch_set_basepri(dq->dq_priority);
} else {
old_dbp = 0;
}

flags = _dispatch_queue_merge_autorelease_frequency(dq, flags);

attempt_running_slow_head:
// 执行dispatch_queue_invoke2函数
// 也就是执行自定义队列里面的任务
tq = invoke(dq, dic, flags, &owned);
dispatch_assert(tq != DISPATCH_QUEUE_WAKEUP_TARGET);
if (unlikely(tq != DISPATCH_QUEUE_WAKEUP_NONE &&
tq != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT)) {
} else if (!_dispatch_queue_drain_try_unlock(dq, owned,
tq == DISPATCH_QUEUE_WAKEUP_NONE)) {
tq = _dispatch_queue_get_current();
if (dx_hastypeflag(tq, QUEUE_ROOT) || !owning) {
goto attempt_running_slow_head;
}
DISPATCH_COMPILER_CAN_ASSUME(tq != DISPATCH_QUEUE_WAKEUP_NONE);
} else {
owned = 0;
tq = NULL;
}
if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
_dispatch_reset_basepri(old_dbp);
}
}
if (likely(owning)) {
_dispatch_introspection_queue_item_complete(dq);
}

if (tq) {
if (const_restrict_flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS) {
dispatch_assert(dic->dic_deferred == NULL);
} else if (dic->dic_deferred) {
return _dispatch_queue_drain_sync_waiter(dq, dic,
flags, owned);
}

uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED;
if (tq == DISPATCH_QUEUE_WAKEUP_MGR) {
enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
}
os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
new_state = old_state - owned;
new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
new_state |= DISPATCH_QUEUE_DIRTY;
if (_dq_state_is_suspended(new_state)) {
new_state |= DLOCK_OWNER_MASK;
} else if (_dq_state_is_runnable(new_state) &&
!_dq_state_is_enqueued(new_state)) {
// drain was not interupted for suspension
// we will reenqueue right away, just put ENQUEUED back
new_state |= enqueued;
}
});
old_state -= owned;
if (_dq_state_received_override(old_state)) {
// Ensure that the root queue sees that this thread was overridden.
_dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state));
}
if ((old_state ^ new_state) & enqueued) {
dispatch_assert(_dq_state_is_enqueued(new_state));
return _dispatch_queue_push_queue(tq, dq, new_state);
}
}

_dispatch_release_2_tailcall(dq);
}

static inline dispatch_queue_wakeup_target_t dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_context_t dic,
dispatch_invoke_flags_t flags, uint64_t *owned)
{
dispatch_queue_t otq = dq->do_targetq;
dispatch_queue_t cq = _dispatch_queue_get_current();

if (slowpath(cq != otq)) {
return otq;
}
if (dq->dq_width == 1) {
return _dispatch_queue_serial_drain(dq, dic, flags, owned);
}
return _dispatch_queue_concurrent_drain(dq, dic, flags, owned);
}

static dispatch_queue_wakeup_target_t _dispatch_queue_concurrent_drain(dispatch_queue_t dq,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
uint64_t *owned)
{
return _dispatch_queue_drain(dq, dic, flags, owned, false);
}

static dispatch_queue_wakeup_target_t
_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic,
dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain)
{
dispatch_queue_t orig_tq = dq->do_targetq;
dispatch_thread_frame_s dtf;
struct dispatch_object_s *dc = NULL, *next_dc;
uint64_t dq_state, owned = *owned_ptr;

if (unlikely(!dq->dq_items_tail)) return NULL;

_dispatch_thread_frame_push(&dtf, dq);
if (serial_drain || _dq_state_is_in_barrier(owned)) {
// we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL`
// but width can change while draining barrier work items, so we only
// convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER`
owned = DISPATCH_QUEUE_IN_BARRIER;
} else {
owned &= DISPATCH_QUEUE_WIDTH_MASK;
}

dc = _dispatch_queue_head(dq);
goto first_iteration;

// 循环执行自定义里面的任务,一个接一个执行,不能并行执行。
for (;;) {
dc = next_dc;
if (unlikely(dic->dic_deferred)) {
goto out_with_deferred_compute_owned;
}
if (unlikely(_dispatch_needs_to_return_to_kernel())) {
_dispatch_return_to_kernel();
}
if (unlikely(!dc)) {
if (!dq->dq_items_tail) {
break;
}
dc = _dispatch_queue_head(dq);
}
if (unlikely(serial_drain != (dq->dq_width == 1))) {
break;
}
if (unlikely(_dispatch_queue_drain_should_narrow(dic))) {
break;
}

first_iteration:
dq_state = os_atomic_load(&dq->dq_state, relaxed);
if (unlikely(_dq_state_is_suspended(dq_state))) {
break;
}
if (unlikely(orig_tq != dq->do_targetq)) {
break;
}

if (serial_drain || _dispatch_object_is_barrier(dc)) {
if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) {
if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) {
goto out_with_no_width;
}
owned = DISPATCH_QUEUE_IN_BARRIER;
}
next_dc = _dispatch_queue_next(dq, dc);
if (_dispatch_object_is_sync_waiter(dc)) {
owned = 0;
dic->dic_deferred = dc;
goto out_with_deferred;
}
} else {
if (owned == DISPATCH_QUEUE_IN_BARRIER) {
os_atomic_xor2o(dq, dq_state, owned, release);
owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
} else if (unlikely(owned == 0)) {
if (_dispatch_object_is_sync_waiter(dc)) {
// sync "readers" don't observe the limit
_dispatch_queue_reserve_sync_width(dq);
} else if (!_dispatch_queue_try_acquire_async(dq)) {
goto out_with_no_width;
}
owned = DISPATCH_QUEUE_WIDTH_INTERVAL;
}

next_dc = _dispatch_queue_next(dq, dc);
if (_dispatch_object_is_sync_waiter(dc)) {
owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
_dispatch_sync_waiter_redirect_or_wake(dq,
DISPATCH_SYNC_WAITER_NO_UNLOCK, dc);
continue;
}

if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) {
owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
_dispatch_continuation_redirect(dq, dc);
continue;
}
}
// 执行这个函数
_dispatch_continuation_pop_inline(dc, dic, flags, dq);
}

if (owned == DISPATCH_QUEUE_IN_BARRIER) {
// if we're IN_BARRIER we really own the full width too
owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
}
if (dc) {
owned = _dispatch_queue_adjust_owned(dq, owned, dc);
}
*owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
*owned_ptr |= owned;
_dispatch_thread_frame_pop(&dtf);
return dc ? dq->do_targetq : NULL;

out_with_no_width:
*owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
_dispatch_thread_frame_pop(&dtf);
return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;

out_with_deferred_compute_owned:
if (serial_drain) {
owned = DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL;
} else {
if (owned == DISPATCH_QUEUE_IN_BARRIER) {
// if we're IN_BARRIER we really own the full width too
owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
}
if (dc) {
owned = _dispatch_queue_adjust_owned(dq, owned, dc);
}
}
out_with_deferred:
*owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
*owned_ptr |= owned;
if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) {
DISPATCH_INTERNAL_CRASH(dc,
"Deferred continuation on source, mach channel or mgr");
}
_dispatch_thread_frame_pop(&dtf);
return dq->do_targetq;
}

static inline void _dispatch_continuation_pop_inline(dispatch_object_t dou,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
dispatch_queue_t dq)
{
dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
_dispatch_get_pthread_root_queue_observer_hooks();
if (observer_hooks) observer_hooks->queue_will_execute(dq);
_dispatch_trace_continuation_pop(dq, dou);
flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
if (_dispatch_object_has_vtable(dou)) {
dx_invoke(dou._do, dic, flags);
} else {
_dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags);
}
if (observer_hooks) observer_hooks->queue_did_execute(dq);
}

static inline void _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov,
dispatch_invoke_flags_t flags)
{
dispatch_continuation_t dc = dou._dc, dc1;
dispatch_invoke_with_autoreleasepool(flags, {
uintptr_t dc_flags = dc->dc_flags;
_dispatch_continuation_voucher_adopt(dc, ov, dc_flags);
if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
dc1 = _dispatch_continuation_free_cacheonly(dc);
} else {
dc1 = NULL;
}
if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) {
_dispatch_continuation_with_group_invoke(dc);
} else {
// 调用_dispatch_client_callout执行block
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
_dispatch_introspection_queue_item_complete(dou);
}
if (unlikely(dc1)) {
_dispatch_continuation_free_to_cache_limit(dc1);
}
});
_dispatch_perfmon_workitem_inc();
}

0x10 dispatch_get_global_queue

  可以发现,dispatch_get_global_queue其实就是取对应优先级的root队列拿来用。所以上面也提过,为啥在global_queue里面不能用barrier。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
dispatch_get_global_queue(long priority, unsigned long flags)
{
if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
return DISPATCH_BAD_INPUT;
}
dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority);

if (qos == DISPATCH_QOS_UNSPECIFIED) {
return DISPATCH_BAD_INPUT;
}
return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
}

static inline dispatch_queue_t _dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) {
DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
}
return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}

0x11 结束语

  常用的GCD几个函数到此就说明完了,每个函数的执行顺序我也用符号断点确认过,基本应该不会有问题。但万一还是有纰漏,请联系本人。

扩展阅读

GCD官方源码

浅谈iOS多线程_使用篇

发表于 2018-01-16 | 分类于 iOS

0x01 老生常谈

  众所周知,多线程的合理使用可以提高CPU利用率,在实际开发中最常见的就是涉及大量计算以及网络请求的时候,都会新开一个线程,避免主线程被阻塞让用户觉得app卡顿。但凡事都不能过犹不及,滥用多线程也会给性能造成影响,毕竟线程之间的切换也是需要花费开销的。

  既然叫多线程,那么线程之间肯定是有优先级区别的,在iOS中,有这么几种权限:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
typedef NS_ENUM(NSInteger, NSQualityOfService) {
// 用户交互权限,属于最高等级,常被用于处理交互事件或者刷新UI,因为这些需要即时的
NSQualityOfServiceUserInteractive = 0x21,

// 为了可以进一步的后续操作,当用户发起请求结果需要被立即展示,比如当点了列表页某条信息后需要立即加载详情信息
NSQualityOfServiceUserInitiated = 0x19,

// 不需要马上就能得到结果,比如下载任务。当资源被限制后,此权限的任务将运行在节能模式下以提供更多资源给更高的优先级任务
NSQualityOfServiceUtility = 0x11,

// 后台权限,通常用户都不能意识到有任务正在进行,比如数据备份等。大多数处于节能模式下,需要把资源让出来给更高的优先级任务
NSQualityOfServiceBackground = 0x09,

// 默认权限,具体权限由系统根据实际情况来决定使用哪个等级权限,如果实际情况不太利于决定使用何种权限,则从UserInitiated和Utility之间选一个权限并使用。
NSQualityOfServiceDefault = -1
}

0x02 使用多线程

  在iOS中,实现多线程常用的有三种方式:NSThread、GCD和NSOperation;以及一个不怎么常用的pthread。

1. NSThread

1.1 如何使用

  加上iOS10新增的,现在有5种初始化方法,具体初始化代码这边就不放了,实例方法中非block初始化的需要调用start方法进行线程开启:

1
2
3
4
5
+ (void)detachNewThreadWithBlock:(void (^)(void))block; // iOS 10 新增
+ (void)detachNewThreadSelector:(SEL)selector toTarget:(id)target withObject:(nullable id)argument;
- (instancetype)initWithBlock:(void (^)(void))block; // iOS 10 新增
- (instancetype)initWithTarget:(id)target selector:(SEL)selector object:(nullable id)argument;
- (instancetype)init;

  苹果更是为了程序员更方便的调用,直接在NSObject写了扩展,我们只要[self perform….]这样的调用,就能轻松开启一个线程。

  在iOS 8开始,苹果给NSThread提供了qualityOfService属性来设置优先级,比如:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
- (void)viewDidLoad {
[super viewDidLoad];

[NSThread detachNewThreadWithBlock:^{
NSLog(@"线程1开始执行。。。");
}];

[NSThread detachNewThreadSelector:@selector(thread2Selector:) toTarget:self withObject:@"test"];

NSThread *thread3 = [[NSThread alloc] initWithBlock:^{
NSLog(@"线程3开始执行。。。");
}];

NSThread *thread4 = [[NSThread alloc] initWithTarget:self selector:@selector(thread4Selector:) object:@"test"];
thread4.qualityOfService = NSQualityOfServiceUserInteractive;

[thread3 start];
[thread4 start];
}

- (void)thread2Selector:(id)info {
NSLog(@"线程2开始执行。。。");
}

- (void)thread4Selector:(id)info {
NSLog(@"线程4开始执行。。。");
}

  运行结果很清楚的看到,我设置了最高等级的线程4优先执行:

1
2
3
4
2018-01-02 16:05:09.599964+0800 HotPatch[54134:37810941] 线程4开始执行。。。
2018-01-02 16:05:09.601452+0800 HotPatch[54134:37810939] 线程2开始执行。。。
2018-01-02 16:05:09.602699+0800 HotPatch[54134:37810938] 线程1开始执行。。。
2018-01-02 16:05:09.602698+0800 HotPatch[54134:37810940] 线程3开始执行。。。

  其他属性这里不做着重说明,API都有的,也都很好理解。

1.2 总结

  我们可以看到,NSThread适用于业务场景不是很复杂的时候,属于比较轻量级的,同时在NSThread进行了相关拓展,所以开发中调用也极其方便。同时缺点也很明显,不能设置线程之间的依赖关系,需要手动管理睡眠唤醒等。


2. Grand Central Dispatch (GCD)

  GCD是C实现的,所以效率相对更高,可以更好更方便的让并行代码执行在多核设备上,而且GCD是系统级的,帮助程序可以更合理的利用可用资源。跟NSThread相比,在使用GCD时我们并不需要手动管理任务,我们只需要把任务塞到队列里,系统的线程池会自动帮我们运行和销毁等。在ARC下,GCD的内存管理跟其他对象一样,在非ARC下,需要通过dispatch_retain和dispatch_release进行管理。
  详细说GCD之前,我们先说说下面几个概念:并行、串行、同步和异步。

  • 串行:任务是一个一个有顺序的执行,一个执行完以后才执行下一个。(有序的)
  • 并行:跟串行相反,任务是无序的执行,执行顺序没有顺序关系。(无序的)
  • 同步:需要等上一个任务执行完成后才能执行下一个任务。(依赖于上个任务是否执行完毕)
  • 异步:不需要等到上一个任务执行完成才执行下一个任务。(无需依赖于上个任务是否执行完毕)

  在GCD里,我们通过DISPATCH_QUEUE_SERIAL和DISPATCH_QUEUE_CONCURRENT分别表示串行和并行;通过dispatch_sync和dispatch_async分别表示同步和异步。它们之间互相结合会碰撞出什么样的火花?我们下面通过代码看下结果:

① 串行同步

1
2
3
4
5
6
7
8
9
10
11
12
13
// 首先是串行同步的情况。
dispatch_queue_t serailQueue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_SERIAL);
dispatch_sync(serailQueue, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_sync(serailQueue, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_sync(serailQueue, ^{
NSLog(@"%@主线程上,第三只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

  根据运行结果,串行同步的任务是在主线程上一个一个有顺序的完成的。

1
2
3
2018-01-03 09:22:59.451187+0800 HotPatch[84240:38942350] 在主线程上,第一只熊猫向你走过来...
2018-01-03 09:22:59.451341+0800 HotPatch[84240:38942350] 在主线程上,第二只熊猫向你走过来...
2018-01-03 09:22:59.451450+0800 HotPatch[84240:38942350] 在主线程上,第三只熊猫向你走过来...

  接着,我们观察下串行异步的结果会是如何?

② 串行异步

1
2
3
4
5
6
7
8
9
10
11
12
13
14
dispatch_queue_t serailQueue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_SERIAL);
dispatch_async(serailQueue, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_async(serailQueue, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_async(serailQueue, ^{
NSLog(@"%@主线程上,第三只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

NSLog(@"%@主线程上,熊猫妈妈在找熊猫宝宝...", [NSThread isMainThread] ? @"在" : @"非");

  根据结果,我们可以看到,串行异步的情况下,新开辟了一条线程,该线程上的执行任务的顺序是有序的,而主线程上的任务是与该线程是并行执行的。

1
2
3
4
2018-01-03 09:24:04.200707+0800 HotPatch[84293:38945063] 在主线程上,熊猫妈妈在找熊猫宝宝...
2018-01-03 09:24:04.200707+0800 HotPatch[84293:38945539] 非主线程上,第一只熊猫向你走过来...
2018-01-03 09:24:04.200874+0800 HotPatch[84293:38945539] 非主线程上,第二只熊猫向你走过来...
2018-01-03 09:24:04.200969+0800 HotPatch[84293:38945539] 非主线程上,第三只熊猫向你走过来...

③ 并行同步

1
2
3
4
5
6
7
8
9
10
11
12
13
14
dispatch_queue_t concurrentQueue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_CONCURRENT);
dispatch_sync(concurrentQueue, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_sync(concurrentQueue, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_sync(concurrentQueue, ^{
NSLog(@"%@主线程上,第三只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

NSLog(@"%@主线程上,熊猫妈妈在找熊猫宝宝...", [NSThread isMainThread] ? @"在" : @"非");

  根据结果来看,虽然是在并行队列上,但是并没有新开线程,都是在主线程上按顺序来执行任务。

1
2
3
4
2018-01-03 09:30:56.633674+0800 HotPatch[84466:38953980] 在主线程上,第一只熊猫向你走过来...
2018-01-03 09:30:56.633827+0800 HotPatch[84466:38953980] 在主线程上,第二只熊猫向你走过来...
2018-01-03 09:30:56.633913+0800 HotPatch[84466:38953980] 在主线程上,第三只熊猫向你走过来...
2018-01-03 09:30:56.633992+0800 HotPatch[84466:38953980] 在主线程上,熊猫妈妈在找熊猫宝宝...

④ 并行异步

1
2
3
4
5
6
7
8
9
10
11
12
13
14
dispatch_queue_t concurrentQueue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(concurrentQueue, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_async(concurrentQueue, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_async(concurrentQueue, ^{
NSLog(@"%@主线程上,第三只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

NSLog(@"%@主线程上,熊猫妈妈在找熊猫宝宝...", [NSThread isMainThread] ? @"在" : @"非");

  根据结果来看,并行异步下,会开启多个线程执行任务,他们之间的顺序包括主线程是无序的。

1
2
3
4
2018-01-03 09:36:48.246392+0800 HotPatch[84632:38962053] 非主线程上,第二只熊猫向你走过来...
2018-01-03 09:36:48.246392+0800 HotPatch[84632:38962045] 非主线程上,第一只熊猫向你走过来...
2018-01-03 09:36:48.246392+0800 HotPatch[84632:38961995] 在主线程上,熊猫妈妈在找熊猫宝宝...
2018-01-03 09:36:48.246392+0800 HotPatch[84632:38962046] 非主线程上,第三只熊猫向你走过来...

  前面我们已经说过,串行异步,会新开一个线程,该线程里的任务按顺序执行,那么,我新建了多个串行队列,那么他们之间的关系是怎么样的?是不是也是按照顺序排排坐吃果果呢?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
dispatch_queue_t serialQueue1 = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_SERIAL);
dispatch_async(serialQueue1, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_queue_t serialQueue2 = dispatch_queue_create("com.zhaomu.test2", DISPATCH_QUEUE_SERIAL);
dispatch_async(serialQueue2, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_queue_t serialQueue3 = dispatch_queue_create("com.zhaomu.test3", DISPATCH_QUEUE_SERIAL);
dispatch_async(serialQueue3, ^{
NSLog(@"%@主线程上,第三只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});
dispatch_async(serialQueue3, ^{
NSLog(@"%@主线程上,第四只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

NSLog(@"%@主线程上,熊猫妈妈在找熊猫宝宝...", [NSThread isMainThread] ? @"在" : @"非");

  根据下面的运行结果可以看到开了多个串行队列的情况下,多个串行队列之间可能是并行执行的。

1
2
3
4
5
2018-01-03 10:09:58.754283+0800 HotPatch[85801:39008283] 在主线程上,熊猫妈妈在找熊猫宝宝...
2018-01-03 10:09:58.754287+0800 HotPatch[85801:39008328] 非主线程上,第二只熊猫向你走过来...
2018-01-03 10:09:58.754287+0800 HotPatch[85801:39008330] 非主线程上,第一只熊猫向你走过来...
2018-01-03 10:09:58.754287+0800 HotPatch[85801:39008329] 非主线程上,第三只熊猫向你走过来...
2018-01-03 10:09:58.754514+0800 HotPatch[85801:39008329] 非主线程上,第四只熊猫向你走过来...

  至此,他们四者之间的关系大致搞清楚了,那么这里遗留一个问题,下面的代码会有什么问题?为什么会导致这样的问题?

1
2
3
4
5
6
7
8
9
10
dispatch_queue_t serialQueue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_SERIAL);
dispatch_sync(serialQueue, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");

dispatch_sync(serialQueue, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});
});

NSLog(@"%@主线程上,熊猫妈妈在找熊猫宝宝...", [NSThread isMainThread] ? @"在" : @"非");

2.1 创建队列

  之前在了解串行、并行、同步和异步的时候,已经看到可以通过dispatch_queue_create创建一个队列。那么还有其他方法吗?相信大家已经很快想到了:

1
2
3
4
5
6
7
8
dispatch_async(dispatch_get_main_queue(), ^{

});

// 或者
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{

});

  对于这两个方法,也没有什么特别要讲的,相信大家在实际开发中也已经用了很多很多了。只是提一点,dispatch_get_global_queue得到的队列, dispatch_suspend,dispatch_resume和dispatch_set_context函数对其是无效的,具体的后面再详细说明。
  回到dispatch_queue_create方法,之前只是说了可以设置参数来控制返回的是串行还是并行队列,但是如果我想对这个队列进行优先级设置,我们该怎么做呢?

1
2
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_USER_INTERACTIVE, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);

  dispatch_queue_attr_make_with_qos_class函数可以设置队列优先级,它的第二个参数设置优先级。这里的优先级有这几个选择,也有其对应的宏定义。具体解释可以对应本文开头的优先级解释:

  • QOS_CLASS_USER_INTERACTIVE (DISPATCH_QUEUE_PRIORITY_HIGH)
  • QOS_CLASS_USER_INITIATED (DISPATCH_QUEUE_PRIORITY_HIGH)
  • QOS_CLASS_UTILITY (DISPATCH_QUEUE_PRIORITY_LOW)
  • QOS_CLASS_DEFAULT (DISPATCH_QUEUE_PRIORITY_DEFAULT)
  • QOS_CLASS_BACKGROUND (DISPATCH_QUEUE_PRIORITY_BACKGROUND)

  dispatch_queue_attr_make_with_qos_class函数的第三个参数,需要填写一个负数的偏移值,小于0且大于等于-15(QOS_MIN_RELATIVE_PRIORITY即表示为-15),必须这么填,不然函数会返回一个null。这个参数主要作用是在你给定的优先级系统不能满足的情况下,如果需要调度的话,给定一个调度偏移值。

  当然,我们还可以通过dispatch_set_target_queue给目标队列设置优先级,比如设置权限前,我们的代码是这样的:

1
2
3
4
5
6
7
8
9
10
11
12
13
dispatch_queue_t queue1 = dispatch_get_global_queue(NSQualityOfServiceUserInitiated, 0);
dispatch_async(queue1, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_queue_t queue2 = dispatch_get_global_queue(NSQualityOfServiceUserInteractive, 0);
dispatch_async(queue2, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

// 运行结果:
2018-01-03 10:51:55.094364+0800 HotPatch[87483:39063303] 非主线程上,第二只熊猫向你走过来...
2018-01-03 10:51:55.094374+0800 HotPatch[87483:39063315] 非主线程上,第一只熊猫向你走过来...

  将第二个队列权限设置为第一个队列一样:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
dispatch_queue_t queue1 = dispatch_get_global_queue(NSQualityOfServiceUserInitiated, 0);
dispatch_async(queue1, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_queue_t queue2 = dispatch_get_global_queue(NSQualityOfServiceUserInteractive, 0);
dispatch_async(queue2, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_set_target_queue(queue2, queue1);

// 运行结果:
2018-01-03 10:54:54.485234+0800 HotPatch[87580:39067563] 非主线程上,第一只熊猫向你走过来...
2018-01-03 10:54:54.485234+0800 HotPatch[87580:39067564] 非主线程上,第二只熊猫向你走过来...

  dispatch_set_target_queue除了可以设置优先级,同样可以设置队列类型,比如如下面的代码,其结果因为可以多开线程,所以其运行顺序是无序的:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_SERIAL);
dispatch_async(queue1, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test1", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(queue2, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});
dispatch_async(queue2, ^{
NSLog(@"%@主线程上,第三只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

NSLog(@"%@主线程上,熊猫妈妈在找熊猫宝宝...", [NSThread isMainThread] ? @"在" : @"非");

// 运行结果
2018-01-03 11:03:07.245436+0800 HotPatch[87849:39079716] 非主线程上,第二只熊猫向你走过来...
2018-01-03 11:03:07.245431+0800 HotPatch[87849:39079659] 在主线程上,熊猫妈妈在找熊猫宝宝...
2018-01-03 11:03:07.245436+0800 HotPatch[87849:39079717] 非主线程上,第一只熊猫向你走过来...
2018-01-03 11:03:07.245436+0800 HotPatch[87849:39079714] 非主线程上,第三只熊猫向你走过来...

  现在我的需求是,队列1执行完后,我再去执行队列2,可以修改如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
 dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", DISPATCH_QUEUE_SERIAL);
dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test1", DISPATCH_QUEUE_CONCURRENT);

dispatch_set_target_queue(queue2, queue1);

dispatch_async(queue1, ^{
NSLog(@"%@主线程上,第一只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

dispatch_async(queue2, ^{
NSLog(@"%@主线程上,第二只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});
dispatch_async(queue2, ^{
NSLog(@"%@主线程上,第三只熊猫向你走过来...", [NSThread isMainThread] ? @"在" : @"非");
});

NSLog(@"%@主线程上,熊猫妈妈在找熊猫宝宝...", [NSThread isMainThread] ? @"在" : @"非");

// 运行结果
2018-01-03 11:20:16.703801+0800 HotPatch[88657:39110595] 在主线程上,熊猫妈妈在找熊猫宝宝...
2018-01-03 11:20:16.703801+0800 HotPatch[88657:39110643] 非主线程上,第一只熊猫向你走过来...
2018-01-03 11:20:16.703987+0800 HotPatch[88657:39110643] 非主线程上,第二只熊猫向你走过来...
2018-01-03 11:20:16.704185+0800 HotPatch[88657:39110643] 非主线程上,第三只熊猫向你走过来...

  需要注意的是,这里dispatch_set_target_queue设置时机是设置block任务之前,同时不能循环设置,比如上面把queue1设置给了queue2,如果在queue2设置给queue1,这样会导致问题。

2.2 设置队列标识

  通过dispatch_queue_set_specific可以设置队列标识,然后通过dispatch_get_specific来帮助我们判断是否在某个队列中。其中参数key是个标识符,用来绑定所对应队列的上下文环境,举个例子:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
static const void *key = "key";

- (void)viewDidLoad {
[super viewDidLoad];

dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", DISPATCH_QUEUE_SERIAL);
dispatch_queue_set_specific(queue1, key, &key, NULL);
dispatch_async(queue1, ^{
[self performSelector:@selector(testSelector) withObject:nil];
});

dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test2", DISPATCH_QUEUE_SERIAL);
dispatch_async(queue2, ^{
[self performSelector:@selector(testSelector2) withObject:nil];
});
}

- (void)testSelector {
if(dispatch_get_specific(key)) {
NSLog(@"这个key所属的队列是queue1队列");
}else{
NSLog(@"这个key所属的队列不是queue1队列");
}
}

- (void)testSelector2 {
if(dispatch_get_specific(key)) {
NSLog(@"这个key所属的队列是queue2队列");
}else{
NSLog(@"这个key所属的队列不是queue2队列");
}
}

// 打印结果
2018-01-03 14:02:16.396954+0800 HotPatch[92482:39270353] 这个key所属的队列不是queue2队列
2018-01-03 14:02:16.396960+0800 HotPatch[92482:39270355] 这个key所属的队列是queue1队列

  看了api可能会有疑问,dispatch_queue_get_specific和dispatch_get_specific,有什么区别?

1
2
void * dispatch_queue_get_specific(dispatch_queue_t queue, const void *key);
void * dispatch_get_specific(const void *key);

  前者比后者多传了一个队列参数,所以显而易见,前者判断的是所传队列的上下文数据是不是绑定为标识符为key的;而后者则是判断当前所在上下文环境是不是标识符为key的。
  细心的朋友可能会问,既然是标识符,那api里还有个dispatch_queue_get_label是干嘛用的?我们之前调用dispatch_queue_create函数的时候,第一个参数就是label。

1
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", DISPATCH_QUEUE_SERIAL);

  这个label,可以在debug的时候,用于显示当前线程所在队列的名称,如下图:线程label示例
  那么label还有什么用处?相信肯定有的人唰唰唰写出如下代码,当然这也不是说不可以:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
- (void)viewDidLoad {
[super viewDidLoad];

dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", DISPATCH_QUEUE_SERIAL);
dispatch_async(queue1, ^{
[self performSelector:@selector(testSelector) withObject:nil];
});
}

- (void)testSelector {
NSString *labelName = [NSString stringWithUTF8String:dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL)];
if ([labelName isEqualToString:@"com.zhaomu.test1"]) {
NSLog(@"现在所处queue1队列");
}
}

  那么问题又来了,如何判断当前是否是主线程,相信很快有人会回答[NSThread isMainThread]!!!那么,我们知道一个主线程可以有多个队列,那么如何判断是否在主队列呢?
  这里介绍两种方法:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
// 第一种方法
- (void)viewDidLoad {
[super viewDidLoad];

dispatch_queue_t mainQueue = dispatch_get_main_queue();
dispatch_queue_set_specific(mainQueue, key, &key, NULL);
dispatch_async(mainQueue, ^{
[self performSelector:@selector(testSelector) withObject:nil];
});
}

- (void)testSelector {
if(dispatch_get_specific(key)) {
NSLog(@"注意,这里是主线程主队列!!!");
}
}

// 第二种方法
NSString *currentLabel = [NSString stringWithUTF8String:dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL)];
NSString *mainLabel = [NSString stringWithUTF8String:dispatch_queue_get_label(dispatch_get_main_queue())];
if([currentLabel isEqualToString:mainLabel]) {
NSLog(@"注意,这里是主线程主队列!!!");
}

2.3 dispatch_after

  这个函数,大家肯定不陌生,用的也肯定滚瓜烂熟。这个函数的作用就是倒计时的时间到了后,将block任务添加到指定的队列里面去,然后执行。

1
2
3
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(1 * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
// 1秒后去做的事情
});

  复习时间到了,那么我给出如下代码,dispatch_after里面的代码会在1秒后执行吗?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_CONCURRENT);

dispatch_sync(queue, ^{
[NSThread sleepForTimeInterval:2];
NSLog(@"本次操作需要2秒,操作完成");
});

dispatch_sync(queue, ^{
[NSThread sleepForTimeInterval:5];
NSLog(@"本次操作需要5秒,操作完成");
});

dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(1 * NSEC_PER_SEC)), queue, ^{
// 1秒后去做的事情
NSLog(@"1秒后能来到这吗");
});

  答案是并不会,我们前面说过,并行同步是排排坐吃果果的,是按顺序执行任务的,dispatch_after一秒后的确把任务提交到队列了,但是因为前面的任务还没有完成,所以它也就只能乖乖等着了。
  谈到延时执行,肯定大家还会想到另一个方法:

1
- (void)performSelector:(SEL)aSelector withObject:(nullable id)anArgument afterDelay:(NSTimeInterval)delay;

  那么这个方法跟dispatch_after又有什么区别呢?观察下面的代码,我的疑问就是熊猫宝宝会向您奔来吗,也就是会不会执行testSelector方法?

1
2
3
4
5
6
7
8
9
10
11
- (void)viewDidLoad {
[super viewDidLoad];

dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
[self performSelector:@selector(testSelector) withObject:nil afterDelay:2];
});
}

- (void)testSelector {
NSLog(@"一只熊猫宝宝向您奔来....");
}

  答案是否定的,方法并没有执行。这是为什么呢?performSelector:afterDelay方法内部其实有个NSTimer定时器,需要把定时器加到runloop进行执行,所以这个方法需要依赖runloop,那么我们必须知道的是,开启的子线程默认是没有runloop的,所以导致testSelector方法没有被执行。而dispatch_after并没有这个问题。
  拓展一下,GCD中除了dispatch_after,还有一种类似于NSTimer的计时器,也就是通过dispatch_source来设置定时器。其实如果看源码dispatch_after内部也是通过dispatch_source进行实现的。这里只是先做介绍,后面到dispatch_source章节再详细说明。

2.4 dispatch_once

  这个函数常用于构造一份唯一的实例,用于app内存域中达到资源共享。所以有时候我们看到一个项目里会有很多单例。那么,我们是否思考过,真的有必要在app里使用很多的单例,那么如果使用不当会造成什么后果?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
- (void)viewDidLoad {
[super viewDidLoad];

[self once];
}

- (void)once {
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
[self once];
});
NSLog(@"遇到一只熊猫宝宝...");
}

// 或者
- (void)viewDidLoad {
[super viewDidLoad];

[self once];
}

- (void)once {
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
[self otherOnce];
});
NSLog(@"遇到第一只熊猫宝宝...");
}

- (void)otherOnce {
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
[self once];
});
NSLog(@"遇到第二只熊猫宝宝...");
}

  我们看下栈的追踪,可以发现死锁了,具体原因在下篇GCD源码篇具体说明。dipatch_once示例
  除了单例这种方式,是否还有别的方式能达到资源共享?在Xcode 8开始,支持了类属性:

1
@property(nonatomic, copy, class) NSString *name;

  所以,我们可以利用这个新的特性,来完成资源共享。

2.5 dispatch_block

  dipatch_block,可以理解为一个block对象,拿到这个对象可以让我们独立操作一个任务,我们可以更灵活的操作一个任务,比如等待、执行以及监听任务的完成,以便在这个任务完成后去做些什么。
  一般来说,我们可以这样定义一个GCD的block对象。

1
2
3
4
5
6
7
dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);

dispatch_block_t block = ^{
NSLog(@"一只熊脑宝宝向你奔来...");
};

dispatch_async(queue, block);

2.5.1 dispatch_block_create

  当然,一般开发场景下并不需要这么啰嗦的实现方式,没必要把block单独拿出来。而且需要注意的是,上面的声明方式,这个block是在栈上的。我们还有一种初始化block对象的方式,但是说这个之前,我们先了解下面枚举值的意思:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
typedef enum : unsigned long {
// 当提交到DISPATCH_QUEUE_CONCURRENT队列,类似于dispatch_barrier_async(后面会介绍)作用。如果标记为这个的block对象被直接调用,将没有barrier效果。
DISPATCH_BLOCK_BARRIER,
// block对象将解除与当前执行上下文属性的关联,如果直接调用,在分配给block属性之前,在调用线程上block对象将在block任务执行期间移出这些属性。如果提交到队列,block对象将使用队列属性或者分配给Block对象的属性。
DISPATCH_BLOCK_DETACHED,
// Block对象被创建的同时会为block对象分配执行上下文属性。如果直接调用,block对象将在block任务执行期间将这些属性应用于调用线程。如果block任务被提交到队列,则这个标识将在提交队列的同时会替换其所关联的block对象默认的上下文属性。
DISPATCH_BLOCK_ASSIGN_CURRENT,
// 表示不能设置优先级属性给block,如果block被直接调用,将会使用当前线程的优先级。如果被提交到队列,在提交到队列的同时将会取消原来的优先级属性。在dispatch_block_create_with_qos_class函数中,这个属性无效。
DISPATCH_BLOCK_NO_QOS_CLASS,
// block和队列同时有优先级属性的情况下,优先使用队列的优先级。当队列没有优先级属性的情况下,block的优先级才会被采用,当block被执行调用,这个属性无效;如果被提交到并行异步队列,这个属性是默认的。
DISPATCH_BLOCK_INHERIT_QOS_CLASS。
// block的优先级属性要高于队列的优先级属性。如果block被直接调用或被提交到并行同步队列,这个属性是默认的
DISPATCH_BLOCK_ENFORCE_QOS_CLASS
} dispatch_block_flags_t;

  dispatch_block_create也可以创建一个block对象,并且重点是分配在堆上的,在创建 的时候,可以设置一个标识位,就是上面说的那几个。
  创建出来的block提交到队列的时候同时会为block赋值一个默认的优先级属性,但也有例外,这三个标识位就不会默认设置优先级,分别是DISPATCH_BLOCK_ASSIGN_CURRENT、DISPATCH_BLOCK_NO_QOS_CLASS和DISPATCH_BLOCK_DETACHED。
  当Block队列放入并行同步队列,DISPATCH_BLOCK_ENFORCE_QOS_CLASS 是默认的。当被放入并行异步队列,DISPATCH_BLOCK_INHERIT_QOS_CLASS是默认的。如果一个被赋值了优先级属性的block对象被放入到一个串行队列,那么系统将会尽可能的让已经在前面的block对象与这个block对象拥有一个优先级或者更高优先级,以让之前的block任务优先执行。
  前面说到可以给block设置优先级,所以先介绍下dispatch_block_create_with_qos_class函数,跟之前dispatch_block_create相比,多了一个dispatch_qos_class_t属性,用来设置优先级,以及relative_priority属性,表示偏移值,这个参数主要作用是在你给定的优先级系统不能满足的情况下,如果需要调度的话,给定一个调度偏移值。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", attr_t);
dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test2", attr_t);
dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
});
dispatch_block_t block2 = dispatch_block_create_with_qos_class(DISPATCH_BLOCK_ENFORCE_QOS_CLASS, QOS_CLASS_USER_INTERACTIVE, -1, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
});
dispatch_block_t block3 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
});
dispatch_async(queue1, block1);
dispatch_async(queue2, block2);
dispatch_async(queue2, block3);

  从运行结果来看,我们使用了DISPATCH_BLOCK_INHERIT_QOS_CLASS标记位,使得block的优先级高于队列的优先级,所以block2始终优先执行。

1
2
3
2018-01-06 21:00:46.583204+0800 HotPatch[4917:43250223] 第二只熊猫宝宝向你奔来...
2018-01-06 21:00:46.583230+0800 HotPatch[4917:43250226] 第一只熊猫宝宝向你奔来...
2018-01-06 21:00:46.583239+0800 HotPatch[4917:43250225] 第三只熊猫宝宝向你奔来...

2.5.2 dispatch_block_perform

  直接执行block,使用这个函数,对block设置优先级是无效的。当然也直接比如block()这样执行block,效果是一样的:

1
2
3
4
5
6
7
8
9
10
11
12
13
    dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
});
dispatch_block_t block2 = dispatch_block_create_with_qos_class(DISPATCH_BLOCK_DETACHED, QOS_CLASS_USER_INTERACTIVE, -1, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
});

dispatch_block_perform(DISPATCH_BLOCK_DETACHED, block1);
block2();

// 运行结果
2018-01-06 21:15:24.000871+0800 HotPatch[5329:43266428] 第一只熊猫宝宝向你奔来...
2018-01-06 21:15:24.001041+0800 HotPatch[5329:43266428] 第二只熊猫宝宝向你奔来...

2.5.2 dispatch_block_wait

  这个函数的作用是等待block任务完成,再继续往下执行代码。需要注意的是,这个函数的使用是要在立即执行命令之后,或者加入到队列之后。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", attr_t);
dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test2", attr_t);
dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
});
dispatch_block_t block2 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝已经抱住你大腿...");
});

dispatch_block_t block3 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
});

dispatch_async(queue1, block1);
dispatch_async(queue2, block2);

dispatch_block_wait(block2, DISPATCH_TIME_FOREVER);

dispatch_async(queue2, block3);
NSLog(@"熊猫妈妈在找熊猫宝宝...");

  看到运行结果,直到block2的任务完成,下面的命令才会被继续执行。

1
2
3
4
5
2018-01-07 09:30:37.900512+0800 HotPatch[7042:43373020] 第二只熊猫宝宝向你奔来...
2018-01-07 09:30:37.900488+0800 HotPatch[7042:43373010] 第一只熊猫宝宝向你奔来...
2018-01-07 09:30:42.971854+0800 HotPatch[7042:43373020] 第二只熊猫宝宝已经抱住你大腿...
2018-01-07 09:30:42.972097+0800 HotPatch[7042:43372948] 熊猫妈妈在找熊猫宝宝...
2018-01-07 09:30:42.972101+0800 HotPatch[7042:43373010] 第三只熊猫宝宝向你奔来...

2.5.3 dispatch_block_notify

  这个函数起到通知的作用,也就是当这个函数监听的任务完成后,会执行dispatch_block_notify函数自己的任务。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", attr_t);
dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test2", attr_t);
dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
});
dispatch_block_t block2 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝已经抱住你大腿...");
});

dispatch_block_t block3 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
});

dispatch_async(queue1, block1);
dispatch_async(queue2, block2);

dispatch_block_notify(block2, queue2, ^{
NSLog(@"捕获一个熊猫宝宝...");
});

dispatch_async(queue2, block3);
NSLog(@"熊猫妈妈在找熊猫宝宝...");

  我们可以看到,dispatch_block_notify的block任务会在block2任务完成后被调用。

1
2
3
4
5
6
2018-01-07 09:35:17.368299+0800 HotPatch[7252:43379370] 熊猫妈妈在找熊猫宝宝...
2018-01-07 09:35:17.368419+0800 HotPatch[7252:43379414] 第一只熊猫宝宝向你奔来...
2018-01-07 09:35:17.368538+0800 HotPatch[7252:43379416] 第二只熊猫宝宝向你奔来...
2018-01-07 09:35:17.368558+0800 HotPatch[7252:43379423] 第三只熊猫宝宝向你奔来...
2018-01-07 09:35:22.369035+0800 HotPatch[7252:43379416] 第二只熊猫宝宝已经抱住你大腿...
2018-01-07 09:35:22.369436+0800 HotPatch[7252:43379416] 捕获一个熊猫宝宝...

  那么,dispatch_block_notify函数的block任务是不是一定是在监听的任务完成后马上调用呢,事实是并不一定的,如果dispatch_block_notify函数的的block任务所在线程前面还有任务没有被执行完毕,那么这个block任务需要等待前面任务的完成,也就没有及时性了。我们举个例子,在串行队列中,就会很明显感到这个:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", attr_t);
dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test2", attr_t);
dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
});
dispatch_block_t block2 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第二只熊猫宝宝已经抱住你大腿...");
});

dispatch_block_t block3 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:8];
NSLog(@"第三只熊猫宝宝已经抱住你大腿...");
});

dispatch_block_t block4 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第四只熊猫宝宝向你奔来...");
});

dispatch_sync(queue1, block1);
dispatch_sync(queue2, block2);
dispatch_sync(queue2, block3);

dispatch_block_notify(block2, queue2, ^{
NSLog(@"捕获一个熊猫宝宝...");
});

dispatch_sync(queue2, block4);
NSLog(@"熊猫妈妈在找熊猫宝宝...");

// 运行结果:
2018-01-07 09:45:29.295842+0800 HotPatch[7413:43393215] 第一只熊猫宝宝向你奔来...
2018-01-07 09:45:29.295967+0800 HotPatch[7413:43393215] 第二只熊猫宝宝向你奔来...
2018-01-07 09:45:32.297043+0800 HotPatch[7413:43393215] 第二只熊猫宝宝已经抱住你大腿...
2018-01-07 09:45:32.297180+0800 HotPatch[7413:43393215] 第三只熊猫宝宝向你奔来...
2018-01-07 09:45:40.298124+0800 HotPatch[7413:43393215] 第三只熊猫宝宝已经抱住你大腿...
2018-01-07 09:45:40.298385+0800 HotPatch[7413:43393215] 第四只熊猫宝宝向你奔来...
2018-01-07 09:45:40.298400+0800 HotPatch[7413:43393272] 捕获一个熊猫宝宝...
2018-01-07 09:45:40.298521+0800 HotPatch[7413:43393215] 熊猫妈妈在找熊猫宝宝...

  如果一个block任务被多个监听,那么这几个dispatch_block_notify函数的回调并不是有序的,也就是说并不是写在前面,这个回调最先执行,而是看所在队列,如果谁先被轮到执行就是谁优先。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", attr_t);
dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test2", attr_t);
dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
});
dispatch_block_t block2 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第二只熊猫宝宝已经抱住你大腿...");
});

dispatch_block_t block3 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
});

dispatch_sync(queue1, block1);
dispatch_sync(queue2, block2);

dispatch_block_notify(block2, queue2, ^{
NSLog(@"捕获一个熊猫宝宝1...");
});

dispatch_block_notify(block2, queue1, ^{
NSLog(@"捕获一个熊猫宝宝2...");
});

dispatch_sync(queue2, block3);
NSLog(@"熊猫妈妈在找熊猫宝宝...");
// 运行结果:
2018-01-07 10:01:01.608315+0800 HotPatch[7755:43410127] 第一只熊猫宝宝向你奔来...
2018-01-07 10:01:01.608485+0800 HotPatch[7755:43410127] 第二只熊猫宝宝向你奔来...
2018-01-07 10:01:04.608668+0800 HotPatch[7755:43410127] 第二只熊猫宝宝已经抱住你大腿...
2018-01-07 10:01:04.608838+0800 HotPatch[7755:43410127] 第三只熊猫宝宝向你奔来...
2018-01-07 10:01:04.608862+0800 HotPatch[7755:43410172] 捕获一个熊猫宝宝2...
2018-01-07 10:01:04.608866+0800 HotPatch[7755:43410182] 捕获一个熊猫宝宝1...
2018-01-07 10:01:04.608929+0800 HotPatch[7755:43410127] 熊猫妈妈在找熊猫宝宝...

2.5.4 dispatch_block_cancel

  这个函数的作用就是取消block任务,但是前提就是这个block还没有被执行。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue1 = dispatch_queue_create("com.zhaomu.test1", attr_t);
dispatch_queue_t queue2 = dispatch_queue_create("com.zhaomu.test2", attr_t);
dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
});
dispatch_block_t block2 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第二只熊猫宝宝已经抱住你大腿...");
});

dispatch_block_t block3 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
});

dispatch_async(queue1, block1);
dispatch_async(queue2, block2);
dispatch_async(queue2, block3);

dispatch_block_cancel(block3);

  运行结果可以看到,block3是没有被执行的,已经被取消了,

1
2
3
2018-01-07 18:46:13.991672+0800 HotPatch[8198:43441461] 第一只熊猫宝宝向你奔来...
2018-01-07 18:46:13.991672+0800 HotPatch[8198:43441459] 第二只熊猫宝宝向你奔来...
2018-01-07 18:46:17.063260+0800 HotPatch[8198:43441459] 第二只熊猫宝宝已经抱住你大腿...

  需要注意的是内存问题,如果这个Block是做内存释放的操作,如果block被取消了,会造成内存问题。

2.5.6 dispatch_block_suspend、dispatch_block_resume

  只适用于自定义的队列,不适用于dispatch_get_global_queue等。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
    dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_CONCURRENT);

dispatch_block_t block1 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第一只熊猫宝宝抱住你的大腿...");
});

dispatch_block_t block2 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝抱住你的大腿...");
});

dispatch_block_t block3 = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第三只熊猫宝宝抱住你的大腿...");
});

dispatch_suspend(queue);

dispatch_async(queue, block1);
dispatch_async(queue, block2);
dispatch_async(queue, block3);

NSLog(@"熊猫妈妈正在找熊猫宝宝....");
[NSThread sleepForTimeInterval:3];
dispatch_resume(queue);

// 运行结果
2018-01-16 11:26:35.628261+0800 HotPatch[95183:56902962] 熊猫妈妈正在找熊猫宝宝....
2018-01-16 11:26:38.629397+0800 HotPatch[95183:56903020] 第一只熊猫宝宝向你奔来...
2018-01-16 11:26:38.629410+0800 HotPatch[95183:56903012] 第二只熊猫宝宝向你奔来...
2018-01-16 11:26:38.629412+0800 HotPatch[95183:56903019] 第三只熊猫宝宝向你奔来...
2018-01-16 11:26:43.629721+0800 HotPatch[95183:56903020] 第一只熊猫宝宝抱住你的大腿...
2018-01-16 11:26:43.629721+0800 HotPatch[95183:56903019] 第三只熊猫宝宝抱住你的大腿...
2018-01-16 11:26:43.629721+0800 HotPatch[95183:56903012] 第二只熊猫宝宝抱住你的大腿...

2.5.7 dispatch_apply

  有时候我们会这样的需求,同时开启多个线程执行某个人物,比如解析多张图片等。这时候我们就没必要写出下面这样的代码

1
2
3
4
5
for (NSInteger i = 0; i < 10; i++) {
dispatch_async(queue, ^{
// TODO SOMETHING
});
}

  更高效的方式如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);


dispatch_apply(10, queue, ^(size_t idx) {
NSLog(@"%@ --> 第%@只熊猫宝宝向你奔来...", [NSThread currentThread],@(idx));
});

// 运行结果:
2018-01-09 17:46:38.843560+0800 HotPatch[97146:83887263] <NSThread: 0x60000007d100>{number = 1, name = main} --> 第5只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843578+0800 HotPatch[97146:83887379] <NSThread: 0x60c000270b00>{number = 4, name = (null)} --> 第2只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843560+0800 HotPatch[97146:83887378] <NSThread: 0x60000027be80>{number = 7, name = (null)} --> 第4只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843560+0800 HotPatch[97146:83887376] <NSThread: 0x60800026e080>{number = 3, name = (null)} --> 第0只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843593+0800 HotPatch[97146:83887398] <NSThread: 0x60400007f580>{number = 9, name = (null)} --> 第7只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843560+0800 HotPatch[97146:83887377] <NSThread: 0x60c000270c80>{number = 6, name = (null)} --> 第3只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843560+0800 HotPatch[97146:83887397] <NSThread: 0x60800026e0c0>{number = 8, name = (null)} --> 第6只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843603+0800 HotPatch[97146:83887386] <NSThread: 0x60400007f4c0>{number = 5, name = (null)} --> 第1只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843771+0800 HotPatch[97146:83887263] <NSThread: 0x60000007d100>{number = 1, name = main} --> 第9只熊猫宝宝向你奔来...
2018-01-09 17:46:38.843786+0800 HotPatch[97146:83887379] <NSThread: 0x60c000270b00>{number = 4, name = (null)} --> 第8只熊猫宝宝向你奔来...

2.6 dispatch_group

  在实际开发中,我们经常会遇到一种情况,就是需要调用多个接口,然后等接口都请求完毕后,我们再去处理一些事情,比如UI更新等。那么这种情况,可以有多种实现方法,其中之一就是这里要介绍的 – dispatch_group。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);
dispatch_group_t group = dispatch_group_create();

dispatch_group_async(group, queue, ^{
NSLog(@"第一只熊猫向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第一只熊猫抱住你的大腿...");
});

dispatch_group_async(group, queue, ^{
NSLog(@"第二只熊猫向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第二只熊猫抱住你的大腿...");
});

dispatch_group_notify(group, queue, ^{
NSLog(@"捕获所有熊猫宝宝...");
});
// 运行结果:
2018-01-08 08:55:22.261748+0800 HotPatch[12918:43636778] 第一只熊猫向你奔来...
2018-01-08 08:55:22.261746+0800 HotPatch[12918:43636779] 第二只熊猫向你奔来...
2018-01-08 08:55:25.334679+0800 HotPatch[12918:43636779] 第二只熊猫抱住你的大腿...
2018-01-08 08:55:25.334679+0800 HotPatch[12918:43636778] 第一只熊猫抱住你的大腿...
2018-01-08 08:55:25.334900+0800 HotPatch[12918:43636779] 捕获所有熊猫宝宝...

  dispatch_group_notify并不是阻塞线程的,如果想要任务没有完成不必继续往下执行,我们可以使用到dispatch_group_wait函数,起到阻塞线程的作用,当然建议是如果情非得已,不是很推荐使用这样的方式。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);
dispatch_group_t group = dispatch_group_create();

dispatch_group_async(group, queue, ^{
NSLog(@"第一只熊猫向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第一只熊猫抱住你的大腿...");
});

dispatch_group_async(group, queue, ^{
NSLog(@"第二只熊猫向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第二只熊猫抱住你的大腿...");
});

dispatch_group_wait(group, DISPATCH_TIME_FOREVER);

NSLog(@"捕获所有熊猫宝宝...");
// 运行结果:
2018-01-08 09:18:28.990738+0800 HotPatch[13852:43662410] 第一只熊猫向你奔来...
2018-01-08 09:18:28.990723+0800 HotPatch[13852:43662408] 第二只熊猫向你奔来...
2018-01-08 09:18:32.062137+0800 HotPatch[13852:43662410] 第一只熊猫抱住你的大腿...
2018-01-08 09:18:32.062137+0800 HotPatch[13852:43662408] 第二只熊猫抱住你的大腿...
2018-01-08 09:18:32.062398+0800 HotPatch[13852:43662317] 捕获所有熊猫宝宝...

  想必,可能很多人有这样的疑问,项目中,大多数后台接口的调用的方式是这样的:

1
2
3
4
5
 [[RequestManager shareInstance] getxxxxSuccess:^{

} failure:^{

}];

  那么,如何把这样的任务,也加入到dispatch_group_t中呢?这时候需要用到这么一对函数:dispatch_group_enter和dispatch_group_leave。

1
2
3
4
5
6
7
8
9
10
11
12
13
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);
dispatch_group_t group = dispatch_group_create();

[[RequestManager shareInstance] getxxxxSuccess:^{
dispatch_group_enter(group);
} failure:^{
dispatch_group_leave(group);
}];

dispatch_group_async(group, queue, ^{
NSLog(@"接口请求完毕...");
});

  当然,除了dispatch_group可以达到我们的需求,那有没有其他方法也可以实现这个功能呢?

2.7 dispatch_semaphore

  dispatch_semaphore的作用除了可以加锁还可以保持线程同步。首先了解两个函数,dispatch_semaphore_signal表示信号量+1,dispatch_semaphore_wait表示如果信号量为0则进行等待,如果不为0,则继续往下执行并-1。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);

dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
dispatch_async(queue, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");

[NSThread sleepForTimeInterval:3];

NSLog(@"第一只熊猫宝宝抱住你大腿...");
dispatch_semaphore_signal(semaphore);
});

dispatch_async(queue, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");

[NSThread sleepForTimeInterval:3];

NSLog(@"第二只熊猫宝宝抱住你大腿...");
dispatch_semaphore_signal(semaphore);
});

dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
NSLog(@"熊猫妈妈在找熊猫宝宝...");

  上面的代码展示了线程同步的问题,当所有任务完成后才继续执行下面的指令。

1
2
3
4
5
2018-01-09 09:05:18.659970+0800 HotPatch[2324:47129676] 第一只熊猫宝宝向你奔来...
2018-01-09 09:05:18.659970+0800 HotPatch[2324:47129675] 第二只熊猫宝宝向你奔来...
2018-01-09 09:05:21.660470+0800 HotPatch[2324:47129676] 第一只熊猫宝宝抱住你大腿...
2018-01-09 09:05:21.660708+0800 HotPatch[2324:47129675] 第二只熊猫宝宝抱住你大腿...
2018-01-09 09:05:21.660749+0800 HotPatch[2324:47129513] 熊猫妈妈在找熊猫宝宝...

  但是一般的建议,除非万不得已,最好不要在主线程调用dispatch_semaphore_wait,毕竟阻塞主线程的事能不做还是不要做的。可以放到异步线程里进行等待:

1
2
3
4
5
dispatch_async(queue, ^{
NSLog(@"你等在原地等待被抱大腿...");
dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
NSLog(@"你的大腿已经被熊猫宝宝抱住...");
});

  当然,也可以控制任务一个接着一个执行。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);

dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
dispatch_async(queue, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");

[NSThread sleepForTimeInterval:3];

NSLog(@"第一只熊猫宝宝抱住你大腿...");
dispatch_semaphore_signal(semaphore);
});

dispatch_async(queue, ^{
dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
NSLog(@"第二只熊猫宝宝向你奔来...");

[NSThread sleepForTimeInterval:3];

NSLog(@"第二只熊猫宝宝抱住你大腿...");
dispatch_semaphore_signal(semaphore);
});

dispatch_async(queue, ^{
dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
NSLog(@"第三只熊猫宝宝向你奔来...");

[NSThread sleepForTimeInterval:3];

NSLog(@"第三只熊猫宝宝抱住你大腿...");
dispatch_semaphore_signal(semaphore);
});

dispatch_async(queue, ^{
dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
NSLog(@"熊猫妈妈在找熊猫宝宝...");
});

// 运行结果:
2018-01-09 17:33:13.467029+0800 HotPatch[96939:83871112] 第一只熊猫宝宝向你奔来...
2018-01-09 17:33:16.467211+0800 HotPatch[96939:83871112] 第一只熊猫宝宝抱住你大腿...
2018-01-09 17:33:16.467420+0800 HotPatch[96939:83871109] 第三只熊猫宝宝向你奔来...
2018-01-09 17:33:19.467450+0800 HotPatch[96939:83871109] 第三只熊猫宝宝抱住你大腿...
2018-01-09 17:33:19.467641+0800 HotPatch[96939:83871110] 第二只熊猫宝宝向你奔来...
2018-01-09 17:33:22.467738+0800 HotPatch[96939:83871110] 第二只熊猫宝宝抱住你大腿...
2018-01-09 17:33:22.467997+0800 HotPatch[96939:83871111] 熊猫妈妈在找熊猫宝宝...

  dispatch_semaphore_wait跟dispatch_semaphore_signal必须匹配的。如果写出dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);这样的代码,而没有dispatch_semaphore_signal与之匹配,就会陷入无限等待。比如在同一个一个线程中,先调用了dispatch_semaphore_wait函数,如下示例:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
// 错误
dispatch_semaphore_t semaphore = dispatch_semaphore_create(1);

dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);

NSLog(@"第一只熊猫宝宝向你奔来...");

[NSThread sleepForTimeInterval:3];

NSLog(@"第一只熊猫宝宝抱住你大腿...");
dispatch_semaphore_signal(semaphore);

// 正确
dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT, QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);

dispatch_semaphore_t semaphore = dispatch_semaphore_create(1);

dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
dispatch_async(queue, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");

[NSThread sleepForTimeInterval:3];

NSLog(@"第一只熊猫宝宝抱住你大腿...");
dispatch_semaphore_signal(semaphore);
});

2.8 dispatch_barrier

2.8.1 dispatch_barrier_async

  dispatch_barrier_async会等待前面的任务执行完毕后,阻塞后面的任务,当自己的任务执行完毕后才会继续执行后面的block任务。需要注意的是,使用这个函数的前提是你使用了自己创建的队列,如果使用dispatch_get_global_queue或者自定义的队列是串行队列则等同于dispatch_async。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);

dispatch_async(queue, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第一只熊猫宝宝抱住了你的大腿...");
});

dispatch_async(queue, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
});

dispatch_barrier_async(queue, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第三只熊猫宝宝抱住了你的大腿...");
});

dispatch_async(queue, ^{
NSLog(@"第四只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第四只熊猫宝宝抱住了你的大腿...");
});

// 运行结果
2018-01-16 09:08:18.239814+0800 HotPatch[91915:56702026] 第二只熊猫宝宝向你奔来...
2018-01-16 09:08:18.239828+0800 HotPatch[91915:56702034] 第一只熊猫宝宝向你奔来...
2018-01-16 09:08:23.306770+0800 HotPatch[91915:56702026] 第二只熊猫宝宝抱住了你的大腿...
2018-01-16 09:08:23.306783+0800 HotPatch[91915:56702034] 第一只熊猫宝宝抱住了你的大腿...
2018-01-16 09:08:23.307009+0800 HotPatch[91915:56702034] 第三只熊猫宝宝向你奔来...
2018-01-16 09:08:28.377030+0800 HotPatch[91915:56702034] 第三只熊猫宝宝抱住了你的大腿...
2018-01-16 09:08:28.377252+0800 HotPatch[91915:56702034] 第四只熊猫宝宝向你奔来...
2018-01-16 09:08:33.451332+0800 HotPatch[91915:56702034] 第四只熊猫宝宝抱住了你的大腿...

  看下,如果使用的不是自定义队列的效果:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);//dispatch_queue_create("com.zhaomu.test", attr_t);

dispatch_async(queue, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第一只熊猫宝宝抱住了你的大腿...");
});

dispatch_async(queue, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
});

dispatch_barrier_async(queue, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第三只熊猫宝宝抱住了你的大腿...");
});

dispatch_async(queue, ^{
NSLog(@"第四只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第四只熊猫宝宝抱住了你的大腿...");
});
NSLog(@"熊猫妈妈找熊猫宝宝....");

// 运行结果
2018-01-16 09:14:19.930798+0800 HotPatch[92146:56719799] 第一只熊猫宝宝向你奔来...
2018-01-16 09:14:19.930798+0800 HotPatch[92146:56719798] 第二只熊猫宝宝向你奔来...
2018-01-16 09:14:19.930798+0800 HotPatch[92146:56719706] 熊猫妈妈找熊猫宝宝....
2018-01-16 09:14:19.930807+0800 HotPatch[92146:56719797] 第三只熊猫宝宝向你奔来...
2018-01-16 09:14:19.930819+0800 HotPatch[92146:56719811] 第四只熊猫宝宝向你奔来...
2018-01-16 09:14:24.930909+0800 HotPatch[92146:56719799] 第一只熊猫宝宝抱住了你的大腿...
2018-01-16 09:14:24.930910+0800 HotPatch[92146:56719811] 第四只熊猫宝宝抱住了你的大腿...
2018-01-16 09:14:24.930910+0800 HotPatch[92146:56719797] 第三只熊猫宝宝抱住了你的大腿...
2018-01-16 09:14:24.930909+0800 HotPatch[92146:56719798] 第二只熊猫宝宝抱住了你的大腿...

2.8.2 dispatch_barrier_sync

  这个函数可以起到阻塞线程的作用,同样的,前提是必须使用自定义的并行队列,串行队列也不可以,不然等同于dispatch_sync。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
    dispatch_queue_attr_t attr_t = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,  QOS_CLASS_UTILITY, QOS_MIN_RELATIVE_PRIORITY);
dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", attr_t);

dispatch_async(queue, ^{
NSLog(@"第一只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第一只熊猫宝宝抱住了你的大腿...");
});

dispatch_async(queue, ^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
});

dispatch_barrier_sync(queue, ^{
NSLog(@"第三只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第三只熊猫宝宝抱住了你的大腿...");
});

dispatch_async(queue, ^{
NSLog(@"第四只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第四只熊猫宝宝抱住了你的大腿...");
});
NSLog(@"熊猫妈妈找熊猫宝宝....");

// 运行结果,可以看到主线程也被阻塞了
2018-01-16 09:16:55.716809+0800 HotPatch[92335:56726566] 第一只熊猫宝宝向你奔来...
2018-01-16 09:16:55.716792+0800 HotPatch[92335:56726568] 第二只熊猫宝宝向你奔来...
2018-01-16 09:17:00.784030+0800 HotPatch[92335:56726568] 第二只熊猫宝宝抱住了你的大腿...
2018-01-16 09:17:00.784030+0800 HotPatch[92335:56726566] 第一只熊猫宝宝抱住了你的大腿...
2018-01-16 09:17:00.784281+0800 HotPatch[92335:56726517] 第三只熊猫宝宝向你奔来...
2018-01-16 09:17:05.785264+0800 HotPatch[92335:56726517] 第三只熊猫宝宝抱住了你的大腿...
2018-01-16 09:17:05.785450+0800 HotPatch[92335:56726517] 熊猫妈妈找熊猫宝宝....
2018-01-16 09:17:05.785495+0800 HotPatch[92335:56726569] 第四只熊猫宝宝向你奔来...
2018-01-16 09:17:10.850254+0800 HotPatch[92335:56726569] 第四只熊猫宝宝抱住了你的大腿...

2.9 dispatch_source

  通过dispatch_source_create创建一个对象。

1
dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, unsigned long mask, dispatch_queue_t queue);

  第一个参数是一个dispatch_source类型,提供的类型如下:

1
2
3
4
5
6
7
8
9
10
11
DISPATCH_SOURCE_TYPE_DATA_ADD
DISPATCH_SOURCE_TYPE_DATA_OR
DISPATCH_SOURCE_TYPE_MACH_RECV
DISPATCH_SOURCE_TYPE_MACH_SEND
DISPATCH_SOURCE_TYPE_PROC
DISPATCH_SOURCE_TYPE_READ
DISPATCH_SOURCE_TYPE_SIGNAL
DISPATCH_SOURCE_TYPE_TIMER
DISPATCH_SOURCE_TYPE_VNODE
DISPATCH_SOURCE_TYPE_WRITE
DISPATCH_SOURCE_TYPE_MEMORYPRESSURE

  例子之前讲过一个定时器,在dispatch_source中,可能算是比较常用的一种。其他具体使用场景,暂未接触到,如果有更好的场景,后面再加进来。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
- (void)viewDidLoad {
[super viewDidLoad];

dispatch_queue_t queue = dispatch_queue_create("com.zhaomu.test", DISPATCH_QUEUE_CONCURRENT);

self.source = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue);
dispatch_source_set_timer(self.source, dispatch_walltime(NULL, 0), 5ull * NSEC_PER_SEC, 0);
dispatch_source_set_event_handler(self.source, ^{
[self testSelector];
});
dispatch_source_set_cancel_handler(self.source, ^{
NSLog(@"");
});
dispatch_resume(self.source);
}

- (void)testSelector {
NSLog(@"一只熊猫宝宝向您奔来....");
}


3. NSOperation

  创建NSOpertioan对象,一般通过NSInvocationOperation和NSBlockOperation来创建。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
- (void)viewDidLoad {
[super viewDidLoad];

NSOperationQueue *queue = [[NSOperationQueue alloc] init];
queue.name = @"com.zhaomu.test";
NSInvocationOperation *op1 = [[NSInvocationOperation alloc] initWithTarget:self selector:@selector(test1) object:nil];
NSBlockOperation *op2 = [NSBlockOperation blockOperationWithBlock:^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
}];

// 加入到队列自动执行任务
[queue addOperation:op1];
[queue addOperation:op2];

// 手动执行任务
// [op1 start];
// [op2 start];
}

- (void)test1 {
NSLog(@"第一只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第一只熊猫宝宝抱住了你的大腿...");
}

// 运行结果
2018-01-16 10:27:55.292137+0800 HotPatch[93798:56833792] 第一只熊猫宝宝向你奔来...
2018-01-16 10:27:55.292138+0800 HotPatch[93798:56833794] 第二只熊猫宝宝向你奔来...
2018-01-16 10:28:00.297201+0800 HotPatch[93798:56833792] 第一只熊猫宝宝抱住了你的大腿...
2018-01-16 10:28:00.297202+0800 HotPatch[93798:56833794] 第二只熊猫宝宝抱住了你的大腿...

  需要特别说一下的是一个NSBlockOperation对象可以不断添加任务,其任务之间是并行执行的。

1
2
3
4
5
6
7
8
9
10
11
NSBlockOperation *op2 = [NSBlockOperation blockOperationWithBlock:^{
NSLog(@"第二只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
}];

[op2 addExecutionBlock:^{
NSLog(@"第四只熊猫宝宝向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第四只熊猫宝宝抱住了你的大腿...");
}];

  当然也是可以设置任务优先级。可以通过queuePriority和qualityOfService设置任务优先级。而且队列之间也可以qualityOfService设置队列的优先级。
  那么相比于GCD,NSOperation有哪些优点呢?

  • 设置最大并发数: maxConcurrentOperationCount

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
        NSOperationQueue *queue = [[NSOperationQueue alloc] init];
    queue.name = @"com.zhaomu.test";
    queue.maxConcurrentOperationCount = 2;

    NSBlockOperation *op1 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第一只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第一只熊猫宝宝抱住了你的大腿...");
    }];
    NSBlockOperation *op2 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第二只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
    }];

    NSBlockOperation *op3 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第三只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第三只熊猫宝宝抱住了你的大腿...");
    }];

    [queue addOperation:op1];
    [queue addOperation:op2];
    [queue addOperation:op3];

    // 运行结果
    2018-01-16 10:31:10.979448+0800 HotPatch[93888:56839976] 第一只熊猫宝宝向你奔来...
    2018-01-16 10:31:10.979449+0800 HotPatch[93888:56839975] 第二只熊猫宝宝向你奔来...
    2018-01-16 10:31:15.981534+0800 HotPatch[93888:56839976] 第一只熊猫宝宝抱住了你的大腿...
    2018-01-16 10:31:15.981534+0800 HotPatch[93888:56839975] 第二只熊猫宝宝抱住了你的大腿...
    2018-01-16 10:31:15.981721+0800 HotPatch[93888:56839983] 第三只熊猫宝宝向你奔来...
    2018-01-16 10:31:20.985040+0800 HotPatch[93888:56839983] 第三只熊猫宝宝抱住了你的大腿...
  • 更方便的设置依赖: addDependency

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
        NSOperationQueue *queue = [[NSOperationQueue alloc] init];
    queue.name = @"com.zhaomu.test";

    NSBlockOperation *op1 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第一只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第一只熊猫宝宝抱住了你的大腿...");
    }];
    NSBlockOperation *op2 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第二只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
    }];

    NSBlockOperation *op3 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第三只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第三只熊猫宝宝抱住了你的大腿...");
    }];

    [op2 addDependency:op1];
    [op3 addDependency:op2];

    [queue addOperation:op1];
    [queue addOperation:op2];
    [queue addOperation:op3];

    // 运行结果
    2018-01-16 10:33:33.063585+0800 HotPatch[93949:56842708] 第一只熊猫宝宝向你奔来...
    2018-01-16 10:33:38.066227+0800 HotPatch[93949:56842708] 第一只熊猫宝宝抱住了你的大腿...
    2018-01-16 10:33:38.066432+0800 HotPatch[93949:56842705] 第二只熊猫宝宝向你奔来...
    2018-01-16 10:33:43.066665+0800 HotPatch[93949:56842705] 第二只熊猫宝宝抱住了你的大腿...
    2018-01-16 10:33:43.066902+0800 HotPatch[93949:56842705] 第三只熊猫宝宝向你奔来...
    2018-01-16 10:33:48.071874+0800 HotPatch[93949:56842705] 第三只熊猫宝宝抱住了你的大腿...
  • 直接取消队列内的所有任务: cancelAllOperations

  • 阻塞线程等待队列任务全部完成:waitUntilAllOperationsAreFinished
  • 更直观的看到当前队列有多少任务: operationCount
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
        NSOperationQueue *queue = [[NSOperationQueue alloc] init];
    queue.name = @"com.zhaomu.test";

    NSBlockOperation *op1 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第一只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第一只熊猫宝宝抱住了你的大腿...");
    }];
    NSBlockOperation *op2 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第二只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第二只熊猫宝宝抱住了你的大腿...");
    }];

    NSBlockOperation *op3 = [NSBlockOperation blockOperationWithBlock:^{
    NSLog(@"第三只熊猫宝宝向你奔来...");
    [NSThread sleepForTimeInterval:5];
    NSLog(@"第三只熊猫宝宝抱住了你的大腿...");
    }];

    [queue addOperation:op1];
    [queue addOperation:op2];
    [queue addOperation:op3];

    [queue waitUntilAllOperationsAreFinished];
    NSLog(@"熊猫妈妈正在找熊猫宝宝...");

    // 运行结果:
    2018-01-16 10:46:02.731265+0800 HotPatch[94363:56860859] 第三只熊猫宝宝向你奔来...
    2018-01-16 10:46:02.731265+0800 HotPatch[94363:56860858] 第二只熊猫宝宝向你奔来...
    2018-01-16 10:46:02.731265+0800 HotPatch[94363:56860865] 第一只熊猫宝宝向你奔来...
    2018-01-16 10:46:07.735805+0800 HotPatch[94363:56860859] 第三只熊猫宝宝抱住了你的大腿...
    2018-01-16 10:46:07.735805+0800 HotPatch[94363:56860865] 第一只熊猫宝宝抱住了你的大腿...
    2018-01-16 10:46:07.735813+0800 HotPatch[94363:56860858] 第二只熊猫宝宝抱住了你的大腿...
    2018-01-16 10:46:07.736033+0800 HotPatch[94363:56860808] 熊猫妈妈正在找熊猫宝宝...

4. pthread

  pthread更接近底层,这里只做简单的了解即可。代码如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
- (void)viewDidLoad {
[super viewDidLoad];

pthread_t thread1;
pthread_t thread2;
pthread_create(&thread1, NULL, test, (__bridge void *)@"熊猫宝宝跑出来拉");
// 阻塞,等待thread1任务完成
pthread_join(thread1, NULL);
pthread_create(&thread2, NULL, test2, NULL);
// 线程结束后自动释放资源
pthread_detach(thread1);
pthread_detach(thread2);
}

void *test(void *data) {
NSLog(@"%@", (__bridge NSString *)data);
NSLog(@"第一只小熊猫向你奔来...");
[NSThread sleepForTimeInterval:5];
NSLog(@"第一只小熊猫抱住你大腿...");
return NULL;
}

void *test2(void *data) {
NSLog(@"第二只小熊猫向你奔来...");
[NSThread sleepForTimeInterval:3];
NSLog(@"第二只小熊猫抱住你大腿...");
return NULL;
}

// 运行结果:
2018-02-07 17:31:20.203050+0800 HotPatch[84419:80030841] 熊猫宝宝跑出来拉
2018-02-07 17:31:20.203215+0800 HotPatch[84419:80030841] 第一只小熊猫向你奔来...
2018-02-07 17:31:25.206790+0800 HotPatch[84419:80030841] 第一只小熊猫抱住你大腿...
2018-02-07 17:31:25.207014+0800 HotPatch[84419:80030951] 第二只小熊猫向你奔来...
2018-02-07 17:31:28.211189+0800 HotPatch[84419:80030951] 第二只小熊猫抱住你大腿...

  如何使用多线程,至此结束,下一篇将要看一看GCD的源码。

12

朝暮

联系方式:leylfl@foxmail.com

17 日志
3 分类
© 2018 朝暮
由 Hexo 强力驱动
|
主题 — NexT.Muse v5.1.4