{"id":1062,"date":"2024-12-06T11:46:22","date_gmt":"2024-12-06T02:46:22","guid":{"rendered":"https:\/\/www.aicritique.org\/us\/?post_type=explainable&#038;p=1062"},"modified":"2024-12-06T11:46:22","modified_gmt":"2024-12-06T02:46:22","slug":"ibm","status":"publish","type":"explainable","link":"https:\/\/www.aicritique.org\/us\/explainable\/ibm\/","title":{"rendered":"IBM"},"content":{"rendered":"\n<ul class=\"wp-block-list\">\n<li><strong>IBM Watson OpenScale<\/strong>: Provides real-time monitoring, fairness checks, and explanations of AI models to ensure transparency and accountability.<\/li>\n\n\n\n<li><strong>AIX360 (AI Explainability 360)<\/strong>: An open-source library providing diverse methods for explaining AI models, including local and global explanations.<\/li>\n\n\n\n<li><strong>IBM Cloud Pak for Data<\/strong>: Includes features that enhance the explainability of AI models, supporting transparency and understanding in business decision-making.<\/li>\n\n\n\n<li><strong>AI Fairness 360 Toolkit (AIF360)<\/strong>: Helps in detecting and mitigating bias in AI models, promoting fair and transparent AI practices.<\/li>\n\n\n\n<li><strong>Watson Studio with Explainable AI(Healthcare)<\/strong>: Offers explainability features that highlight the reasons behind predictions, aiding in transparency and trust.<\/li>\n<\/ul>\n\n\n\n<p><\/p>\n","protected":false},"featured_media":0,"template":"","class_list":["post-1062","explainable","type-explainable","status-publish","hentry"],"_links":{"self":[{"href":"https:\/\/www.aicritique.org\/us\/wp-json\/wp\/v2\/explainable\/1062","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.aicritique.org\/us\/wp-json\/wp\/v2\/explainable"}],"about":[{"href":"https:\/\/www.aicritique.org\/us\/wp-json\/wp\/v2\/types\/explainable"}],"wp:attachment":[{"href":"https:\/\/www.aicritique.org\/us\/wp-json\/wp\/v2\/media?parent=1062"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}