{"id":1923,"date":"2023-11-21T10:33:55","date_gmt":"2023-11-21T02:33:55","guid":{"rendered":"http:\/\/zhang.mba\/?p=1923"},"modified":"2023-11-21T10:38:35","modified_gmt":"2023-11-21T02:38:35","slug":"spacy-yong-yu-wen-ben-fen-xi-shi-ti-shi-bie-yi-cun","status":"publish","type":"post","link":"https:\/\/zhang.mba\/index.php\/2023\/11\/21\/10\/33\/55\/1923\/spacy-yong-yu-wen-ben-fen-xi-shi-ti-shi-bie-yi-cun\/python\/zhangzhiqi\/","title":{"rendered":"SpaCy\uff0c\u7528\u4e8e\u6587\u672c\u5206\u6790\u3001\u5b9e\u4f53\u8bc6\u522b\u3001\u4f9d\u5b58\u5206\u6790"},"content":{"rendered":"<p>SpaCy \u662f\u4e00\u4e2a\u5f00\u6e90\u7684 Python \u5e93\uff0c\u4e13\u4e3a\u9ad8\u7ea7\u81ea\u7136\u8bed\u8a00\u5904\u7406\u4efb\u52a1\u800c\u8bbe\u8ba1\uff0c\u4f8b\u5982<strong>\u6587\u672c\u5206\u6790\u3001\u5b9e\u4f53\u8bc6\u522b\u3001\u4f9d\u5b58\u5206\u6790<\/strong>\u7b49\u3002<\/p>\n<p>SpaCy \u6ce8\u91cd\u6027\u80fd\u548c\u6613\u7528\u6027\uff0c\u4e3a\u5404\u79cd NLP \u4efb\u52a1\u63d0\u4f9b\u7edf\u4e00\u7684 API\uff0c\u4f7f\u5176\u6210\u4e3a\u6587\u672c\u5904\u7406\u9886\u57df\u7684\u521d\u5b66\u8005\u548c\u4e13\u5bb6\u7684\u7406\u60f3\u5de5\u5177\u3002<\/p>\n<p>SpaCy \u7684\u4e00\u4e9b\u4e3b\u8981\u529f\u80fd\u5305\u62ec\u3002<\/p>\n<ul>\n<li><strong>\u9ad8\u6027\u80fd<\/strong>\uff1aSpaCy \u4e13\u4e3a\u901f\u5ea6\u548c\u6548\u7387\u800c\u8bbe\u8ba1\uff0c\u4f7f\u4f60\u80fd\u591f\u5feb\u901f\u8f7b\u677e\u5730\u5904\u7406\u548c\u5206\u6790\u5927\u91cf\u6587\u672c\u3002<\/li>\n<li><strong>\u6613\u4e8e\u4f7f\u7528<\/strong>\uff1a\u901a\u8fc7\u76f4\u89c2\u7684 API \u548c\u5168\u9762\u7684\u6587\u6863\uff0cSpaCy \u7b80\u5316\u4e86\u590d\u6742\u7684 NLP \u4efb\u52a1\uff0c\u5e76\u4e3a\u6240\u6709\u6280\u80fd\u6c34\u5e73\u7684\u7528\u6237\u63d0\u4f9b\u4e86\u5e73\u6ed1\u7684\u5b66\u4e60\u66f2\u7ebf\u3002<\/li>\n<li><strong>\u53ef\u5b9a\u5236\u6027<\/strong>\uff1aSpaCy \u5141\u8bb8\u521b\u5efa\u81ea\u5b9a\u4e49\u7ba1\u9053\u548c\u6269\u5c55\uff0c\u4f7f\u4f60\u80fd\u591f\u6839\u636e\u4f60\u7684\u7279\u5b9a\u9700\u6c42\u548c\u8981\u6c42\u5b9a\u5236\u5e93\u3002<\/li>\n<li><strong>\u4e0e\u5176\u4ed6\u5e93\u96c6\u6210<\/strong>\uff1aSpaCy \u53ef\u4ee5\u8f7b\u677e\u4e0e\u5176\u4ed6\u6d41\u884c\u7684 Python \u5e93\uff08\u4f8b\u5982 TensorFlow\u3001PyTorch \u548c scikit-learn\uff09\u96c6\u6210\uff0c\u8fdb\u4e00\u6b65\u6269\u5c55\u5176\u529f\u80fd\u548c\u5e94\u7528\u3002<\/li>\n<\/ul>\n<h3><a id=\"%E7%89%B9%E7%82%B9\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u7279\u70b9<\/h3>\n<p><strong>\u5b83\u5177\u6709\u5982\u4e0b\u7279\u70b9\u3002<\/strong><\/p>\n<ul>\n<li>\u652f\u6301 70 \u591a\u79cd\u8bed\u8a00<\/li>\n<li>\u9488\u5bf9\u4e0d\u540c\u8bed\u8a00\u548c\u4efb\u52a1\u7684\u8bad\u7ec3\u7ba1\u9053<\/li>\n<li>\u4f7f\u7528 BERT \u7b49\u9884\u8bad\u7ec3 Transformer \u8fdb\u884c\u591a\u4efb\u52a1\u5b66\u4e60<\/li>\n<li>\u652f\u6301\u9884\u8bad\u7ec3\u7684\u8bcd\u5411\u91cf\u548c\u5d4c\u5165<\/li>\n<li>\u6700\u5148\u8fdb\u7684\u901f\u5ea6<\/li>\n<li>\u7528\u4e8e\u547d\u540d\u5b9e\u4f53\u8bc6\u522b\u3001\u8bcd\u6027\u6807\u8bb0\u3001\u4f9d\u5b58\u5206\u6790\u3001\u53e5\u5b50\u5206\u5272\u3001\u6587\u672c\u5206\u7c7b\u3001\u8bcd\u5f62\u8fd8\u539f\u3001\u5f62\u6001\u5206\u6790\u3001\u5b9e\u4f53\u94fe\u63a5\u7b49\u7684\u7ec4\u4ef6<\/li>\n<li>\u53ef\u901a\u8fc7\u81ea\u5b9a\u4e49\u7ec4\u4ef6\u548c\u5c5e\u6027\u8f7b\u677e\u6269\u5c55<\/li>\n<li>\u652f\u6301 PyTorch\u3001TensorFlow \u548c\u5176\u4ed6\u6846\u67b6\u4e2d\u7684\u81ea\u5b9a\u4e49\u6a21\u578b<\/li>\n<li>\u5185\u7f6e\u8bed\u6cd5\u548c NER \u53ef\u89c6\u5316\u5de5\u5177<\/li>\n<li>\u8f7b\u677e\u7684\u6a21\u578b\u6253\u5305\u3001\u90e8\u7f72\u548c\u5de5\u4f5c\u6d41\u7a0b\u7ba1\u7406<\/li>\n<li>\u7a33\u5065\u4e14\u7ecf\u8fc7\u4e25\u683c\u8bc4\u4f30\u7684\u51c6\u786e\u6027<\/li>\n<\/ul>\n<p><strong>\u8001\u89c4\u77e9\uff1a\u5982\u679c\u89c9\u5f97\u6587\u7ae0\u4e0d\u9519\uff01\u6b22\u8fce\u5927\u5bb6\u70b9\u8d5e\u3001\u8f6c\u53d1\u5b89\u6392\u8d77\u6765\u3002<\/strong><\/p>\n<h3><a id=\"%E5%88%9D%E4%BD%93%E9%AA%8C\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u521d\u4f53\u9a8c<\/h3>\n<h4><a id=\"%E5%BA%93%E7%9A%84%E5%AE%89%E8%A3%85\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u5e93\u7684\u5b89\u88c5<\/h4>\n<p>\u8981\u5f00\u59cb\u4f7f\u7528 SpaCy\uff0c\u4f60\u9700\u8981\u5b89\u88c5\u8be5\u5e93\u53ca\u5176\u4f9d\u8d56\u9879\u3002\u4f60\u53ef\u4ee5\u4f7f\u7528 pip \u6267\u884c\u6b64\u64cd\u4f5c\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">pip\u00a0install\u00a0spacy\r\n<\/code><\/pre>\n<p>\u5b89\u88c5\u5b8c\u6210\u540e\uff0c\u4f60\u8fd8\u53ef\u4ee5\u4e0b\u8f7d\u6240\u9700\u8bed\u8a00\u7684\u9884\u8bad\u7ec3\u6a21\u578b\u3002\u4f8b\u5982\uff0c\u8981\u4e0b\u8f7d\u82f1\u6587\u6a21\u578b\uff0c\u8bf7\u8fd0\u884c\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">python\u00a0-m\u00a0spacy\u00a0download\u00a0en_core_web_sm\r\n<\/code><\/pre>\n<h4><a id=\"%E5%9F%BA%E6%9C%AC%E6%96%87%E6%9C%AC%E5%A4%84%E7%90%86%E6%8A%80%E6%9C%AF\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u57fa\u672c\u6587\u672c\u5904\u7406\u6280\u672f<\/h4>\n<p>\u5728\u672c\u8282\u4e2d\uff0c\u6211\u4eec\u5c06\u63a2\u8ba8\u4e00\u4e9b\u57fa\u672c\u7684\u6587\u672c\u5904\u7406\u6280\u672f\uff0c\u8fd9\u4e9b\u6280\u672f\u6784\u6210\u4e86\u4efb\u4f55 NLP \u9879\u76ee\u7684\u57fa\u7840\u3002<\/p>\n<p><strong>\u6807\u8bb0\u5316<\/strong><\/p>\n<p>\u6807\u8bb0\u5316\u662f\u5c06\u6587\u672c\u5206\u89e3\u4e3a\u5355\u4e2a\u6807\u8bb0\uff08\u4f8b\u5982\u5355\u8bcd\u3001\u53e5\u5b50\u6216\u77ed\u8bed\uff09\u7684\u8fc7\u7a0b\u3002\u4f7f\u7528 SpaCy\uff0c\u53ea\u9700\u51e0\u884c\u4ee3\u7801\u5373\u53ef\u5b9e\u73b0\u6807\u8bb0\u5316\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">import spacy\r\nnlp = spacy.load(\"en_core_web_sm\")\r\ntext = \"This is a sample sentence.\"\r\ndoc = nlp(text)\r\nfor token in doc:\r\n    print(token.text)\r\n<\/code><\/pre>\n<h5><a id=\"%E8%AF%8D%E5%BD%A2%E8%BF%98%E5%8E%9F\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u8bcd\u5f62\u8fd8\u539f<\/h5>\n<p>\u8bcd\u5f62\u8fd8\u539f\u662f\u5c06\u5355\u8bcd\u8fd8\u539f\u4e3a\u5176\u57fa\u672c\u5f62\u5f0f\u7684\u8fc7\u7a0b\u3002<\/p>\n<p>\u8fd9\u6709\u52a9\u4e8e\u89c4\u8303\u6587\u672c\u5e76\u5408\u5e76\u76f8\u4f3c\u7684\u5355\u8bcd\u4ee5\u8fdb\u884c\u8fdb\u4e00\u6b65\u5206\u6790\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">for token in doc:\r\n    print(token.text, token.lemma_)\r\n<\/code><\/pre>\n<h5><a id=\"%E8%AF%8D%E6%80%A7%E6%A0%87%E6%B3%A8\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u8bcd\u6027\u6807\u6ce8<\/h5>\n<p>\u8bcd\u6027 (POS) \u6807\u8bb0\u6d89\u53ca\u4e3a\u6587\u672c\u4e2d\u7684\u6bcf\u4e2a\u6807\u8bb0\u5206\u914d\u8bed\u6cd5\u7c7b\u522b\uff0c\u4f8b\u5982\u540d\u8bcd\u3001\u52a8\u8bcd\u6216\u5f62\u5bb9\u8bcd\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">for token in doc:\r\n    print(token.text, token.pos_)\r\n<\/code><\/pre>\n<h5><a id=\"%E5%91%BD%E5%90%8D%E5%AE%9E%E4%BD%93%E8%AF%86%E5%88%AB\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u547d\u540d\u5b9e\u4f53\u8bc6\u522b<\/h5>\n<p>\u547d\u540d\u5b9e\u4f53\u8bc6\u522b (NER) \u662f\u5bf9\u6587\u672c\u4e2d\u7684\u547d\u540d\u5b9e\u4f53\uff08\u4f8b\u5982\u4eba\u5458\u3001\u7ec4\u7ec7\u6216\u4f4d\u7f6e\uff09\u8fdb\u884c\u8bc6\u522b\u548c\u5206\u7c7b\u7684\u8fc7\u7a0b\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">for ent in doc.ents:\r\n    print(ent.text, ent.label_)\r\n<\/code><\/pre>\n<h4><a id=\"%E6%96%87%E6%9C%AC%E5%A4%84%E7%90%86%E6%8A%80%E6%9C%AF%E8%BF%9B%E9%98%B6\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u6587\u672c\u5904\u7406\u6280\u672f\u8fdb\u9636<\/h4>\n<p>\u5728\u672c\u8282\u4e2d\uff0c\u6211\u4eec\u5c06\u6df1\u5165\u7814\u7a76\u66f4\u5148\u8fdb\u7684\u6587\u672c\u5904\u7406\u6280\u672f\uff0c\u4ee5\u4fbf\u66f4\u6df1\u5165\u5730\u5206\u6790\u548c\u7406\u89e3\u6587\u672c\u6570\u636e\u3002<\/p>\n<h5><a id=\"%E6%96%87%E6%9C%AC%E5%88%86%E7%B1%BB\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u6587\u672c\u5206\u7c7b<\/h5>\n<p>\u6587\u672c\u5206\u7c7b\u662f\u6839\u636e\u6587\u672c\u5185\u5bb9\u5c06\u6587\u672c\u5206\u7c7b\u4e3a\u9884\u5b9a\u4e49\u7c7b\u522b\u7684\u4efb\u52a1\u3002<\/p>\n<p>\u4f7f\u7528 SpaCy\uff0c\u4f60\u53ef\u4ee5\u8bad\u7ec3\u81ea\u5b9a\u4e49\u6587\u672c\u5206\u7c7b\u5668\u6765\u6267\u884c\u60c5\u611f\u5206\u6790\u3001\u4e3b\u9898\u5206\u7c7b\u6216\u5783\u573e\u90ae\u4ef6\u68c0\u6d4b\u7b49\u4efb\u52a1\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">import spacy\r\nimport random\r\nfrom spacy.training.example import Example\r\n\r\n# Load the pre-trained model\r\nnlp = spacy.load(\"en_core_web_sm\")\r\n\r\n# Create a blank TextCategorizer with the \"textcat\" name\r\nif \"textcat\" not in nlp.pipe_names:\r\n    textcat = nlp.create_pipe(\"textcat\", config={\"exclusive_classes\": True, \"architecture\": \"simple_cnn\"})\r\n    nlp.add_pipe(textcat, last=True)\r\nelse:\r\n    textcat = nlp.get_pipe(\"textcat\")\r\n\r\n# Add labels (categories) to the text classifier\r\ntextcat.add_label(\"LABEL_1\")\r\ntextcat.add_label(\"LABEL_2\")\r\n# Add more labels as needed\r\n\r\n# Prepare the training data\r\ntrain_data = [(\"Text example 1\", {\"cats\": {\"LABEL_1\": 1, \"LABEL_2\": 0}}),\r\n             (\"Text example 2\", {\"cats\": {\"LABEL_1\": 0, \"LABEL_2\": 1}}),\r\n             # Add more training examples with their corresponding labels\r\n            ]\r\n\r\n# Training loop\r\nrandom.seed(1)\r\nspacy.util.fix_random_seed(1)\r\noptimizer = nlp.begin_training()\r\n\r\nfor epoch in range(10):  # You can adjust the number of epochs\r\n    random.shuffle(train_data)\r\n    losses = {}\r\n    # Batch the training data\r\n    for batch in spacy.util.minibatch(train_data, size=2):\r\n        texts, annotations = zip(*batch)\r\n        example = []\r\n        # Update the model with iterating each text\r\n        for i in range(len(texts)):\r\n            doc = nlp.make_doc(texts[i])\r\n            example.append(Example.from_dict(doc, annotations[i]))\r\n        nlp.update(example, drop=0.5, losses=losses)\r\n    print(losses)\r\n\r\n# Save the trained model to a file\r\nnlp.to_disk(\"custom_model\")\r\n\r\n# Test the trained model\r\ntest_text = \"This is a test text.\"\r\ndoc = nlp(test_text)\r\nprint(\"Predicted categories:\", doc.cats)\r\n<\/code><\/pre>\n<h5><a id=\"%E6%96%87%E6%9C%AC%E6%8F%90%E5%8F%96\" class=\"anchor\" aria-hidden=\"true\"><\/a>\u6587\u672c\u63d0\u53d6<\/h5>\n<p>\u6587\u672c\u63d0\u53d6\u6d89\u53ca\u4ece\u975e\u7ed3\u6784\u5316\u6587\u672c\u4e2d\u63d0\u53d6\u7279\u5b9a\u4fe1\u606f\u6216\u6a21\u5f0f\u3002<\/p>\n<p>SpaCy \u5f3a\u5927\u7684 NER \u529f\u80fd\u53ef\u4ee5\u6269\u5c55\u4ee5\u63d0\u53d6\u81ea\u5b9a\u4e49\u5b9e\u4f53\u548c\u6a21\u5f0f\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">import spacy\r\nfrom spacy.matcher import Matcher\r\nfrom spacy.tokens import Doc, Span\r\n\r\n# Load a SpaCy model (you can use a pre-trained model or a blank one)\r\nnlp = spacy.load(\"en_core_web_sm\")\r\n\r\n# Define a custom entity type\r\nCustomEntity = nlp.vocab.strings.add(\"CUSTOM_ENTITY\")\r\n\r\n# Create a custom component to add the entity to the Doc\r\ndef add_custom_entity_to_doc(doc, start, end, label):\r\n    entity = Span(doc, start, end, label=label)\r\n    doc.ents += (entity,)\r\n    return doc\r\n\r\n# Example: Let's say you want to extract \"OpenAI\" as a custom entity\r\nmatcher = Matcher(nlp.vocab)\r\nmatcher.add(\"CustomEntityPattern\", None, [{\"LOWER\": \"openai\"}])\r\n\r\n# Custom function to handle matched patterns and add custom entities\r\ndef custom_entity_matcher(doc, matcher, custom_entity_type):\r\n    matches = matcher(doc)\r\n    spans = [doc[start:end] for match_id, start, end in matches]\r\n    for span in spans:\r\n        doc = add_custom_entity_to_doc(doc, span.start, span.end, custom_entity_type)\r\n    return doc\r\n\r\n# Add the custom entity matcher to the pipeline\r\nnlp.add_pipe(custom_entity_matcher, last=True, config={\"custom_entity_type\": CustomEntity})\r\n\r\n# Process a text document\r\ntext = \"OpenAI is an AI research lab. openai develops advanced AI models.\"\r\ndoc = nlp(text)\r\n\r\n# Iterate through entities and print them\r\nfor ent in doc.ents:\r\n    print(f\"Entity: {ent.text}, Label: {ent.label_}\")\r\n\r\n# Output will show the custom entity \"OpenAI\" as extracted\r\n<\/code><\/pre>\n<h4><a id=\"spacy%E4%B8%8E%E5%85%B6%E4%BB%96%E5%BA%93%E9%9B%86%E6%88%90\" class=\"anchor\" aria-hidden=\"true\"><\/a>SpaCy \u4e0e\u5176\u4ed6\u5e93\u96c6\u6210<\/h4>\n<p>SpaCy \u53ef\u4ee5\u8f7b\u677e\u4e0e\u5176\u4ed6\u6d41\u884c\u7684 Python \u5e93\uff08\u4f8b\u5982 TensorFlow\u3001PyTorch \u548c scikit-learn\uff09\u96c6\u6210\uff0c\u4ee5\u6269\u5c55\u5176\u529f\u80fd\u548c\u5e94\u7528\u7a0b\u5e8f\u3002<\/p>\n<pre class=\"line-numbers\"><code class=\"language-python\">import spacy\r\nimport nltk\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\n\r\n# Load SpaCy model and NLTK sentiment analyzer\r\nnlp = spacy.load(\"en_core_web_sm\")\r\nnltk.download(\"vader_lexicon\")\r\nsid = SentimentIntensityAnalyzer()\r\n\r\n# Text to analyze\r\ntext = \"SpaCy and NLTK integration is great! I love working with both.\"\r\n\r\n# SpaCy for tokenization and part-of-speech tagging\r\ndoc = nlp(text)\r\n\r\n# Tokenize and perform part-of-speech tagging with SpaCy\r\ntokens = [token.text for token in doc]\r\npos_tags = [token.pos_ for token in doc]\r\n\r\nprint(\"Tokens:\", tokens)\r\nprint(\"Part-of-Speech Tags:\", pos_tags)\r\n\r\n# NLTK for sentiment analysis\r\n# Note: NLTK's sentiment analysis is not built-in, so we use the VADER sentiment analyzer\r\nsentiment = sid.polarity_scores(text)\r\n\r\nprint(\"Sentiment Analysis Results:\")\r\nfor key, value in sentiment.items():\r\n    print(f\"{key}: {value}\")\r\n\r\n# You can integrate and use other libraries, such as scikit-learn for machine learning or other NLP libraries, as needed for your project.\r\n<\/code><\/pre>\n<p>\u901a\u8fc7 SpaCy \u638c\u63e1 NLP \u7684\u5f3a\u5927\u529f\u80fd\uff0c\u4f60\u53ef\u4ee5\u91ca\u653e\u6587\u672c\u5206\u6790\u548c\u8bed\u8a00\u5904\u7406\u7684\u6f5c\u529b\uff0c\u63d0\u4f9b\u6709\u4ef7\u503c\u7684\u89c1\u89e3\u548c\u81ea\u52a8\u5316\u529f\u80fd\u3002<\/p>\n<p>\u6765\u6e90&#8212;&#8211;\u5c0f\u5bd2<\/p>\n<!--CusAds0-->\n<div style=\"font-size: 0px; height: 0px; line-height: 0px; margin: 0; padding: 0; clear: both;\"><\/div>","protected":false},"excerpt":{"rendered":"<p>SpaCy \u662f\u4e00\u4e2a\u5f00\u6e90\u7684 Python \u5e93\uff0c\u4e13\u4e3a\u9ad8\u7ea7\u81ea\u7136\u8bed\u8a00\u5904\u7406\u4efb\u52a1\u800c\u8bbe\u8ba1\uff0c\u4f8b\u5982\u6587\u672c\u5206\u6790\u3001\u5b9e\u4f53\u8bc6\u522b\u3001\u4f9d\u5b58\u5206\u6790\u7b49\u3002<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"closed","sticky":false,"template":"","format":"standard","meta":{"_bbp_topic_count":0,"_bbp_reply_count":0,"_bbp_total_topic_count":0,"_bbp_total_reply_count":0,"_bbp_voice_count":0,"_bbp_anonymous_reply_count":0,"_bbp_topic_count_hidden":0,"_bbp_reply_count_hidden":0,"_bbp_forum_subforum_count":0},"categories":[12,70],"tags":[54],"_links":{"self":[{"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/posts\/1923"}],"collection":[{"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/comments?post=1923"}],"version-history":[{"count":0,"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/posts\/1923\/revisions"}],"wp:attachment":[{"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/media?parent=1923"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/categories?post=1923"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/zhang.mba\/index.php\/wp-json\/wp\/v2\/tags?post=1923"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}